Java Code Examples for org.apache.accumulo.core.client.BatchWriter#close()
The following examples show how to use
org.apache.accumulo.core.client.BatchWriter#close() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MixedGeoAndGeoWaveTest.java From datawave with Apache License 2.0 | 6 votes |
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception { final TableOperations tops = connector.tableOperations(); final Set<BulkIngestKey> biKeys = keyValues.keySet(); for (final BulkIngestKey biKey : biKeys) { final String tableName = biKey.getTableName().toString(); if (!tops.exists(tableName)) tops.create(tableName); final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (final Value val : keyValues.get(biKey)) { final Mutation mutation = new Mutation(biKey.getKey().getRow()); mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey() .getTimestamp(), val); writer.addMutation(mutation); } writer.close(); } }
Example 2
Source File: AccumuloStorageTest.java From rya with Apache License 2.0 | 6 votes |
public void testSimpleOutput() throws Exception { BatchWriter batchWriter = connector.createBatchWriter(table, 10l, 10l, 2); Mutation row = new Mutation("row"); row.put("cf", "cq", new Value(new byte[0])); batchWriter.addMutation(row); batchWriter.flush(); batchWriter.close(); String location = "accumulo://" + table + "?instance=" + instance + "&user=" + user + "&password=" + pwd + "&range=a|z&mock=true"; AccumuloStorage storage = createAccumuloStorage(location); int count = 0; while (true) { Tuple next = storage.getNext(); if (next == null) break; assertEquals(6, next.size()); count++; } assertEquals(1, count); }
Example 3
Source File: CountIT.java From accumulo-examples with Apache License 2.0 | 6 votes |
@Before public void setupInstance() throws Exception { tableName = getUniqueNames(1)[0]; client = Accumulo.newClient().from(getClientProperties()).build(); client.tableOperations().create(tableName); BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig()); ColumnVisibility cv = new ColumnVisibility(); // / has 1 dir // /local has 2 dirs 1 file // /local/user1 has 2 files bw.addMutation(Ingest.buildMutation(cv, "/local", true, false, true, 272, 12345, null)); bw.addMutation(Ingest.buildMutation(cv, "/local/user1", true, false, true, 272, 12345, null)); bw.addMutation(Ingest.buildMutation(cv, "/local/user2", true, false, true, 272, 12345, null)); bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 12345, null)); bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 23456, null)); bw.addMutation( Ingest.buildMutation(cv, "/local/user1/file1", false, false, false, 2024, 12345, null)); bw.addMutation( Ingest.buildMutation(cv, "/local/user1/file2", false, false, false, 1028, 23456, null)); bw.close(); }
Example 4
Source File: ChunkInputFormatIT.java From accumulo-examples with Apache License 2.0 | 6 votes |
@Test public void testInfoWithoutChunks() throws Exception { client.tableOperations().create(tableName); BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig()); for (Entry<Key,Value> e : baddata) { Key k = e.getKey(); Mutation m = new Mutation(k.getRow()); m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue()); bw.addMutation(m); } bw.close(); assertEquals(0, CIFTester.main(tableName, CIFTester.TestBadData.class.getName())); assertEquals(1, assertionErrors.get(tableName).size()); }
Example 5
Source File: ChunkInputFormatIT.java From accumulo-examples with Apache License 2.0 | 6 votes |
@Test public void testErrorOnNextWithoutClose() throws Exception { client.tableOperations().create(tableName); BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig()); for (Entry<Key,Value> e : data) { Key k = e.getKey(); Mutation m = new Mutation(k.getRow()); m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue()); bw.addMutation(m); } bw.close(); assertEquals(1, CIFTester.main(tableName, CIFTester.TestNoClose.class.getName())); assertEquals(1, assertionErrors.get(tableName).size()); // this should actually exist, in addition to the dummy entry assertEquals(2, assertionErrors.get(tableName + "_map_ioexception").size()); }
Example 6
Source File: ChunkInputFormatIT.java From accumulo-examples with Apache License 2.0 | 6 votes |
@Test public void test() throws Exception { client.tableOperations().create(tableName); BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig()); for (Entry<Key,Value> e : data) { Key k = e.getKey(); Mutation m = new Mutation(k.getRow()); m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue()); bw.addMutation(m); } bw.close(); assertEquals(0, CIFTester.main(tableName, CIFTester.TestMapper.class.getName())); assertEquals(1, assertionErrors.get(tableName).size()); }
Example 7
Source File: AccumuloStorageTest.java From rya with Apache License 2.0 | 6 votes |
public void testColumns() throws Exception { BatchWriter batchWriter = connector.createBatchWriter(table, 10l, 10l, 2); Mutation row = new Mutation("a"); row.put("cf1", "cq", new Value(new byte[0])); row.put("cf2", "cq", new Value(new byte[0])); row.put("cf3", "cq1", new Value(new byte[0])); row.put("cf3", "cq2", new Value(new byte[0])); batchWriter.addMutation(row); batchWriter.flush(); batchWriter.close(); String location = "accumulo://" + table + "?instance=" + instance + "&user=" + user + "&password=" + pwd + "&range=a|c&columns=cf1,cf3|cq1&mock=true"; AccumuloStorage storage = createAccumuloStorage(location); int count = 0; while (true) { Tuple next = storage.getNext(); if (next == null) break; assertEquals(6, next.size()); count++; } assertEquals(2, count); }
Example 8
Source File: AccumuloStorageTest.java From rya with Apache License 2.0 | 6 votes |
public void testWholeRowRange() throws Exception { BatchWriter batchWriter = connector.createBatchWriter(table, 10l, 10l, 2); Mutation row = new Mutation("a"); row.put("cf1", "cq", new Value(new byte[0])); row.put("cf2", "cq", new Value(new byte[0])); row.put("cf3", "cq1", new Value(new byte[0])); row.put("cf3", "cq2", new Value(new byte[0])); batchWriter.addMutation(row); batchWriter.flush(); batchWriter.close(); String location = "accumulo://" + table + "?instance=" + instance + "&user=" + user + "&password=" + pwd + "&range=a&mock=true"; AccumuloStorage storage = createAccumuloStorage(location); int count = 0; while (true) { Tuple next = storage.getNext(); if (next == null) break; assertEquals(6, next.size()); count++; } assertEquals(4, count); }
Example 9
Source File: ExceededOrThresholdMarkerJexlNodeTest.java From datawave with Apache License 2.0 | 6 votes |
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception { final TableOperations tops = connector.tableOperations(); final Set<BulkIngestKey> biKeys = keyValues.keySet(); for (final BulkIngestKey biKey : biKeys) { final String tableName = biKey.getTableName().toString(); if (!tops.exists(tableName)) tops.create(tableName); final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (final Value val : keyValues.get(biKey)) { final Mutation mutation = new Mutation(biKey.getKey().getRow()); mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey() .getTimestamp(), val); writer.addMutation(mutation); } writer.close(); } }
Example 10
Source File: AbstractFunctionalQuery.java From datawave with Apache License 2.0 | 6 votes |
protected Multimap<String,Key> removeMetadataEntries(Set<String> fields, Text cf) throws AccumuloSecurityException, AccumuloException, TableNotFoundException { Multimap<String,Key> metadataEntries = HashMultimap.create(); MultiTableBatchWriter multiTableWriter = connector.createMultiTableBatchWriter(new BatchWriterConfig()); BatchWriter writer = multiTableWriter.getBatchWriter(QueryTestTableHelper.METADATA_TABLE_NAME); for (String field : fields) { Mutation mutation = new Mutation(new Text(field)); Scanner scanner = connector.createScanner(QueryTestTableHelper.METADATA_TABLE_NAME, new Authorizations()); scanner.fetchColumnFamily(cf); scanner.setRange(new Range(new Text(field))); boolean foundEntries = false; for (Map.Entry<Key,Value> entry : scanner) { foundEntries = true; metadataEntries.put(field, entry.getKey()); mutation.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier(), entry.getKey().getColumnVisibilityParsed()); } scanner.close(); if (foundEntries) { writer.addMutation(mutation); } } writer.close(); connector.tableOperations().compact(QueryTestTableHelper.METADATA_TABLE_NAME, new Text("\0"), new Text("~"), true, true); return metadataEntries; }
Example 11
Source File: GeoSortedQueryDataTest.java From datawave with Apache License 2.0 | 6 votes |
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception { final TableOperations tops = connector.tableOperations(); final Set<BulkIngestKey> biKeys = keyValues.keySet(); for (final BulkIngestKey biKey : biKeys) { final String tableName = biKey.getTableName().toString(); if (!tops.exists(tableName)) tops.create(tableName); final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (final Value val : keyValues.get(biKey)) { final Mutation mutation = new Mutation(biKey.getKey().getRow()); mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), val); writer.addMutation(mutation); } writer.close(); } }
Example 12
Source File: CompositeIndexTest.java From datawave with Apache License 2.0 | 6 votes |
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception { final TableOperations tops = connector.tableOperations(); final Set<BulkIngestKey> biKeys = keyValues.keySet(); for (final BulkIngestKey biKey : biKeys) { final String tableName = biKey.getTableName().toString(); if (!tops.exists(tableName)) tops.create(tableName); final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (final Value val : keyValues.get(biKey)) { final Mutation mutation = new Mutation(biKey.getKey().getRow()); mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey() .getTimestamp(), val); writer.addMutation(mutation); } writer.close(); } }
Example 13
Source File: MultiValueCompositeIndexTest.java From datawave with Apache License 2.0 | 6 votes |
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception { final TableOperations tops = connector.tableOperations(); final Set<BulkIngestKey> biKeys = keyValues.keySet(); for (final BulkIngestKey biKey : biKeys) { final String tableName = biKey.getTableName().toString(); if (!tops.exists(tableName)) tops.create(tableName); final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (final Value val : keyValues.get(biKey)) { final Mutation mutation = new Mutation(biKey.getKey().getRow()); mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey() .getTimestamp(), val); writer.addMutation(mutation); } writer.close(); } }
Example 14
Source File: AbstractFunctionalQuery.java From datawave with Apache License 2.0 | 5 votes |
protected void addMetadataEntries(Multimap<String,Key> metadataEntries) throws AccumuloSecurityException, AccumuloException, TableNotFoundException { MultiTableBatchWriter multiTableWriter = connector.createMultiTableBatchWriter(new BatchWriterConfig()); BatchWriter writer = multiTableWriter.getBatchWriter(QueryTestTableHelper.METADATA_TABLE_NAME); for (String field : metadataEntries.keySet()) { Mutation mutation = new Mutation(new Text(field)); for (Key key : metadataEntries.get(field)) { metadataEntries.put(field, key); mutation.put(key.getColumnFamily(), key.getColumnQualifier(), key.getColumnVisibilityParsed(), new Value()); } writer.addMutation(mutation); } writer.close(); connector.tableOperations().compact(QueryTestTableHelper.METADATA_TABLE_NAME, new Text("\0"), new Text("~"), true, true); }
Example 15
Source File: FileCount.java From accumulo-examples with Apache License 2.0 | 5 votes |
public void run() throws Exception { entriesScanned = 0; inserts = 0; Scanner scanner = client.createScanner(tableName, auths); scanner.setBatchSize(scanOpts.scanBatchSize); BatchWriter bw = client.createBatchWriter(tableName, bwOpts.getBatchWriterConfig()); long t1 = System.currentTimeMillis(); int depth = findMaxDepth(scanner); long t2 = System.currentTimeMillis(); for (int d = depth; d > 0; d--) { calculateCounts(scanner, d, bw); // must flush so next depth can read what prev depth wrote bw.flush(); } bw.close(); long t3 = System.currentTimeMillis(); System.out.printf("Max depth : %d%n", depth); System.out.printf("Time to find max depth : %,d ms%n", (t2 - t1)); System.out.printf("Time to compute counts : %,d ms%n", (t3 - t2)); System.out.printf("Entries scanned : %,d %n", entriesScanned); System.out.printf("Counts inserted : %,d %n", inserts); }
Example 16
Source File: Ingest.java From accumulo-examples with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { Opts opts = new Opts(); BatchWriterOpts bwOpts = new BatchWriterOpts(); opts.parseArgs(Ingest.class.getName(), args, bwOpts); try (AccumuloClient client = opts.createAccumuloClient()) { if (!client.tableOperations().exists(opts.nameTable)) client.tableOperations().create(opts.nameTable); if (!client.tableOperations().exists(opts.indexTable)) client.tableOperations().create(opts.indexTable); if (!client.tableOperations().exists(opts.dataTable)) { client.tableOperations().create(opts.dataTable); client.tableOperations().attachIterator(opts.dataTable, new IteratorSetting(1, ChunkCombiner.class)); } BatchWriter dirBW = client.createBatchWriter(opts.nameTable, bwOpts.getBatchWriterConfig()); BatchWriter indexBW = client.createBatchWriter(opts.indexTable, bwOpts.getBatchWriterConfig()); BatchWriter dataBW = client.createBatchWriter(opts.dataTable, bwOpts.getBatchWriterConfig()); FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility); for (String dir : opts.directories) { recurse(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW); // fill in parent directory info int slashIndex = -1; while ((slashIndex = dir.lastIndexOf("/")) > 0) { dir = dir.substring(0, slashIndex); ingest(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW); } } ingest(new File("/"), opts.visibility, dirBW, indexBW, fdi, dataBW); dirBW.close(); indexBW.close(); dataBW.close(); } }
Example 17
Source File: MutationBuilderIT.java From fluo with Apache License 2.0 | 4 votes |
@Test public void testBatchWrite() throws Exception { // test initializing a Fluo table by batch writing to it // use a batch writer to test this because its easier than using AccumuloOutputFormat BatchWriter bw = aClient.createBatchWriter(table, new BatchWriterConfig()); try { FluoMutationGenerator mb1 = new FluoMutationGenerator(Bytes.of("row1")); mb1.put(new Column("cf1", "cq1"), Bytes.of("v1")); mb1.put(new Column("cf1", "cq2"), Bytes.of("v2")); mb1.put(new Column("cf1", "cq3"), Bytes.of("v3")); bw.addMutation(mb1.build()); FluoMutationGenerator mb2 = new FluoMutationGenerator(Bytes.of("row2")); mb2.put(new Column("cf1", "cq1"), Bytes.of("v4")); mb2.put(new Column("cf1", "cq2"), Bytes.of("v5")); bw.addMutation(mb2.build()); } finally { bw.close(); } TestTransaction tx1 = new TestTransaction(env); TestTransaction tx2 = new TestTransaction(env); Assert.assertEquals("v1", tx1.gets("row1", new Column("cf1", "cq1"))); Assert.assertEquals("v2", tx1.gets("row1", new Column("cf1", "cq2"))); Assert.assertEquals("v3", tx1.gets("row1", new Column("cf1", "cq3"))); Assert.assertEquals("v4", tx1.gets("row2", new Column("cf1", "cq1"))); Assert.assertEquals("v5", tx1.gets("row2", new Column("cf1", "cq2"))); tx1.set("row1", new Column("cf1", "cq2"), "v6"); tx1.delete("row1", new Column("cf1", "cq3")); tx1.set("row2", new Column("cf1", "cq2"), "v7"); tx1.done(); // tx2 should see not changes from tx1 Assert.assertEquals("v1", tx2.gets("row1", new Column("cf1", "cq1"))); Assert.assertEquals("v2", tx2.gets("row1", new Column("cf1", "cq2"))); Assert.assertEquals("v3", tx2.gets("row1", new Column("cf1", "cq3"))); Assert.assertEquals("v4", tx2.gets("row2", new Column("cf1", "cq1"))); Assert.assertEquals("v5", tx2.gets("row2", new Column("cf1", "cq2"))); TestTransaction tx3 = new TestTransaction(env); // should see changes from tx1 Assert.assertEquals("v1", tx3.gets("row1", new Column("cf1", "cq1"))); Assert.assertEquals("v6", tx3.gets("row1", new Column("cf1", "cq2"))); Assert.assertNull(tx3.gets("row1", new Column("cf1", "cq3"))); Assert.assertEquals("v4", tx3.gets("row2", new Column("cf1", "cq1"))); Assert.assertEquals("v7", tx3.gets("row2", new Column("cf1", "cq2"))); }
Example 18
Source File: WebSocketClientIT.java From qonduit with Apache License 2.0 | 4 votes |
private void doScan(WebSocketClient client) throws Exception { long now = System.currentTimeMillis(); String tableName = "qonduit.scanTest"; Connector con = mac.getConnector(MAC_ROOT_USER, MAC_ROOT_PASSWORD); con.namespaceOperations().create("qonduit"); con.tableOperations().create(tableName); BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency(2, TimeUnit.SECONDS); BatchWriter writer = con.createBatchWriter(tableName, bwc); ColumnVisibility cv = new ColumnVisibility(); for (int i = 0; i < 10; i++) { Mutation m = new Mutation("m" + i); m.put("cf" + i, "cq" + i, cv, now + i, Integer.toString(i)); writer.addMutation(m); } writer.flush(); writer.close(); sleepUninterruptibly(2, TimeUnit.SECONDS); List<byte[]> responses = new ArrayList<>(); String id = UUID.randomUUID().toString(); ScanRequest request = new ScanRequest(); request.setRequestId(id); request.setTableName(tableName); request.setResultBatchSize(5); doIt(client, request, responses, 3); Assert.assertEquals(11, responses.size()); for (byte[] b : responses) { KVPair kv = JsonSerializer.getObjectMapper().readValue(b, KVPair.class); Value val = kv.getValue(); if (null != val) { int num = Integer.parseInt(new String(val.getValue())); Key key = kv.getKey().toKey(); Assert.assertEquals("m" + num, key.getRow().toString()); Assert.assertEquals("cf" + num, key.getColumnFamily().toString()); Assert.assertEquals("cq" + num, key.getColumnQualifier().toString()); Assert.assertEquals(now + num, key.getTimestamp()); Assert.assertEquals(id, kv.getRequestId()); } else { Assert.assertTrue(kv.isEndOfResults()); } } }
Example 19
Source File: ChunkInputStreamIT.java From accumulo-examples with Apache License 2.0 | 4 votes |
@Test public void testWithAccumulo() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException { client.tableOperations().create(tableName); BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig()); for (Entry<Key,Value> e : data) { Key k = e.getKey(); Mutation m = new Mutation(k.getRow()); m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), e.getValue()); bw.addMutation(m); } bw.close(); Scanner scan = client.createScanner(tableName, AUTHS); ChunkInputStream cis = new ChunkInputStream(); byte[] b = new byte[20]; int read; PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(scan.iterator()); cis.setSource(pi); assertEquals(read = cis.read(b), 8); assertEquals(new String(b, 0, read), "asdfjkl;"); assertEquals(read = cis.read(b), -1); cis.setSource(pi); assertEquals(read = cis.read(b), 10); assertEquals(new String(b, 0, read), "qwertyuiop"); assertEquals(read = cis.read(b), -1); assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]"); cis.close(); cis.setSource(pi); assertEquals(read = cis.read(b), 16); assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;"); assertEquals(read = cis.read(b), -1); assertEquals(cis.getVisibilities().toString(), "[A&B]"); cis.close(); cis.setSource(pi); assertEquals(read = cis.read(b), -1); cis.close(); cis.setSource(pi); assertEquals(read = cis.read(b), 8); assertEquals(new String(b, 0, read), "asdfjkl;"); assertEquals(read = cis.read(b), -1); cis.close(); assertFalse(pi.hasNext()); }
Example 20
Source File: NumShardsTest.java From datawave with Apache License 2.0 | 4 votes |
@Test public void testUpdateCacheWithoutEntries() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException, ParseException { // configure mock accumulo instance and populate with a couple of multiple numshards entries PasswordToken noPasswordToken = new PasswordToken(); InMemoryInstance i = new InMemoryInstance("mock2"); Connector connector = i.getConnector("root", noPasswordToken); Configuration conf = new Configuration(); conf.set(AccumuloHelper.USERNAME, "root"); conf.set(AccumuloHelper.INSTANCE_NAME, "mock2"); conf.set(AccumuloHelper.PASSWORD, noPasswordToken.toString()); conf.set(AccumuloHelper.ZOOKEEPERS, i.getZooKeepers()); conf.set(ShardedDataTypeHandler.METADATA_TABLE_NAME, TableName.METADATA); connector.tableOperations().create(conf.get(ShardedDataTypeHandler.METADATA_TABLE_NAME)); BatchWriter recordWriter = connector.createBatchWriter(conf.get(ShardedDataTypeHandler.METADATA_TABLE_NAME), new BatchWriterConfig()); // write a couiple of entries for multiple numshards Mutation m = new Mutation(NumShards.NUM_SHARDS); m.put(NumShards.NUM_SHARDS_CF + "blah", "20171102_19", ""); recordWriter.addMutation(m); recordWriter.close(); File multipleNumShardCache = File.createTempFile("numshards", ".txt"); multipleNumShardCache.deleteOnExit(); conf.set(ShardIdFactory.NUM_SHARDS, "11"); conf.set(NumShards.ENABLE_MULTIPLE_NUMSHARDS, "true"); conf.set(NumShards.MULTIPLE_NUMSHARDS_CACHE_PATH, multipleNumShardCache.getParent()); AccumuloHelper mockedAccumuloHelper = EasyMock.createMock(AccumuloHelper.class); mockedAccumuloHelper.setup(conf); EasyMock.expectLastCall(); EasyMock.expect(mockedAccumuloHelper.getConnector()).andReturn(connector); EasyMock.replay(mockedAccumuloHelper); NumShards numShards = new NumShards(conf); // these should create numshards.txt file based on multiple numshards entries in mock accumulo numShards.setaHelper(mockedAccumuloHelper); numShards.updateCache(); assertEquals(11, numShards.getNumShards(0)); assertEquals(11, numShards.getNumShards(Long.MAX_VALUE)); assertEquals(11, numShards.getNumShards("")); assertEquals(11, numShards.getNumShards("20171102")); assertEquals(11, numShards.getMinNumShards()); assertEquals(11, numShards.getMaxNumShards()); assertEquals(1, numShards.getShardCount()); }