Java Code Examples for org.apache.accumulo.core.client.Connector#createBatchWriter()
The following examples show how to use
org.apache.accumulo.core.client.Connector#createBatchWriter() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AccumuloChangelogStore.java From accumulo-recipes with Apache License 2.0 | 6 votes |
public AccumuloChangelogStore(Connector connector, String tableName, StoreConfig config, BucketSize bucketSize) throws TableExistsException, AccumuloSecurityException, AccumuloException, TableNotFoundException { checkNotNull(connector); checkNotNull(tableName); checkNotNull(config); checkNotNull(bucketSize); this.connector = connector; this.tableName = tableName; this.config = config; this.bucketSize = bucketSize; if (!connector.tableOperations().exists(tableName)) { connector.tableOperations().create(tableName); configureTable(connector, tableName); } writer = connector.createBatchWriter(tableName, config.getMaxMemory(), config.getMaxLatency(), config.getMaxWriteThreads()); }
Example 2
Source File: EdgeKeyVersioningCache.java From datawave with Apache License 2.0 | 6 votes |
private String seedMetadataTable(Connector connector, long time, int keyVersionNum) throws TableNotFoundException, MutationsRejectedException { Value emptyVal = new Value(); SimpleDateFormat dateFormat = new SimpleDateFormat(DateNormalizer.ISO_8601_FORMAT_STRING); String dateString = dateFormat.format(new Date(time)); try (BatchWriter recordWriter = connector.createBatchWriter(metadataTableName, new BatchWriterConfig())) { String normalizedVersionNum = NumericalEncoder.encode(Integer.toString(keyVersionNum)); String rowID = "edge_key"; String columnFamily = "version"; String columnQualifier = normalizedVersionNum + "/" + dateString; Mutation m = new Mutation(rowID); m.put(new Text(columnFamily), new Text(columnQualifier), emptyVal); recordWriter.addMutation(m); } return dateString; }
Example 3
Source File: MixedGeoAndGeoWaveTest.java From datawave with Apache License 2.0 | 6 votes |
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception { final TableOperations tops = connector.tableOperations(); final Set<BulkIngestKey> biKeys = keyValues.keySet(); for (final BulkIngestKey biKey : biKeys) { final String tableName = biKey.getTableName().toString(); if (!tops.exists(tableName)) tops.create(tableName); final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (final Value val : keyValues.get(biKey)) { final Mutation mutation = new Mutation(biKey.getKey().getRow()); mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey() .getTimestamp(), val); writer.addMutation(mutation); } writer.close(); } }
Example 4
Source File: MultiValueCompositeIndexTest.java From datawave with Apache License 2.0 | 6 votes |
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception { final TableOperations tops = connector.tableOperations(); final Set<BulkIngestKey> biKeys = keyValues.keySet(); for (final BulkIngestKey biKey : biKeys) { final String tableName = biKey.getTableName().toString(); if (!tops.exists(tableName)) tops.create(tableName); final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (final Value val : keyValues.get(biKey)) { final Mutation mutation = new Mutation(biKey.getKey().getRow()); mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey() .getTimestamp(), val); writer.addMutation(mutation); } writer.close(); } }
Example 5
Source File: CompositeIndexTest.java From datawave with Apache License 2.0 | 6 votes |
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception { final TableOperations tops = connector.tableOperations(); final Set<BulkIngestKey> biKeys = keyValues.keySet(); for (final BulkIngestKey biKey : biKeys) { final String tableName = biKey.getTableName().toString(); if (!tops.exists(tableName)) tops.create(tableName); final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (final Value val : keyValues.get(biKey)) { final Mutation mutation = new Mutation(biKey.getKey().getRow()); mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey() .getTimestamp(), val); writer.addMutation(mutation); } writer.close(); } }
Example 6
Source File: GeoSortedQueryDataTest.java From datawave with Apache License 2.0 | 6 votes |
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception { final TableOperations tops = connector.tableOperations(); final Set<BulkIngestKey> biKeys = keyValues.keySet(); for (final BulkIngestKey biKey : biKeys) { final String tableName = biKey.getTableName().toString(); if (!tops.exists(tableName)) tops.create(tableName); final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (final Value val : keyValues.get(biKey)) { final Mutation mutation = new Mutation(biKey.getKey().getRow()); mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), val); writer.addMutation(mutation); } writer.close(); } }
Example 7
Source File: ExceededOrThresholdMarkerJexlNodeTest.java From datawave with Apache License 2.0 | 6 votes |
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception { final TableOperations tops = connector.tableOperations(); final Set<BulkIngestKey> biKeys = keyValues.keySet(); for (final BulkIngestKey biKey : biKeys) { final String tableName = biKey.getTableName().toString(); if (!tops.exists(tableName)) tops.create(tableName); final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (final Value val : keyValues.get(biKey)) { final Mutation mutation = new Mutation(biKey.getKey().getRow()); mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey() .getTimestamp(), val); writer.addMutation(mutation); } writer.close(); } }
Example 8
Source File: ContentFunctionQueryTest.java From datawave with Apache License 2.0 | 6 votes |
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception { final TableOperations tops = connector.tableOperations(); final Set<BulkIngestKey> biKeys = keyValues.keySet(); tops.create(TableName.DATE_INDEX); for (final BulkIngestKey biKey : biKeys) { final String tableName = biKey.getTableName().toString(); if (!tops.exists(tableName)) tops.create(tableName); final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (final Value val : keyValues.get(biKey)) { final Mutation mutation = new Mutation(biKey.getKey().getRow()); mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), biKey.getKey() .getTimestamp(), val); writer.addMutation(mutation); } writer.close(); } }
Example 9
Source File: KeyValueIndex.java From accumulo-recipes with Apache License 2.0 | 5 votes |
public KeyValueIndex(Connector connector, String indexTable, ShardBuilder<T> shardBuilder, StoreConfig config, TypeRegistry<String> typeRegistry) throws TableNotFoundException, TableExistsException, AccumuloSecurityException, AccumuloException { this.shardBuilder = shardBuilder; this.typeRegistry = typeRegistry; this.indexTable = indexTable; this.connector = connector; this.config = config; if(!connector.tableOperations().exists(indexTable)) connector.tableOperations().create(indexTable); writer = connector.createBatchWriter(indexTable, config.getMaxMemory(), config.getMaxLatency(), config.getMaxWriteThreads()); }
Example 10
Source File: ProspectorUtils.java From rya with Apache License 2.0 | 5 votes |
public static void writeMutations(final Connector connector, final String tableName, final Collection<Mutation> mutations) throws TableNotFoundException, MutationsRejectedException { final BatchWriter bw = connector.createBatchWriter(tableName, 10000l, 10000l, 4); for(final Mutation mutation : mutations) { bw.addMutation(mutation); } bw.flush(); bw.close(); }
Example 11
Source File: QueriesTableAgeOffIteratorTest.java From datawave with Apache License 2.0 | 5 votes |
@Test public void testAgeOffIterator() throws Exception { InMemoryInstance instance = new InMemoryInstance(); Connector connector = instance.getConnector("root", new PasswordToken("")); connector.tableOperations().create(TABLE_NAME); IteratorSetting iteratorCfg = new IteratorSetting(19, "ageoff", QueriesTableAgeOffIterator.class); connector.tableOperations().attachIterator(TABLE_NAME, iteratorCfg, EnumSet.allOf(IteratorScope.class)); long now = System.currentTimeMillis(); // Write in a couple of keys with varying timestamps BatchWriter writer = connector.createBatchWriter(TABLE_NAME, new BatchWriterConfig().setMaxLatency(30, TimeUnit.MILLISECONDS).setMaxMemory(1024L) .setMaxWriteThreads(1)); Mutation m1 = new Mutation("row1"); m1.put("colf1", "colq1", now, ""); writer.addMutation(m1); Mutation m2 = new Mutation("row2"); m2.put("colf2", "colq2", (now + 100000), ""); writer.addMutation(m2); writer.close(); // Scan the entire table, we should only see keys whose timestamps are greater than or equal to now. // Mutation 1 should be expired by now, we should only see Mutation 2; boolean sawRow2 = false; Scanner scanner = connector.createScanner(TABLE_NAME, new Authorizations()); for (Entry<Key,Value> entry : scanner) { if (entry.getKey().getRow().toString().equals("row1")) Assert.fail("We saw row1 when it should be expired."); if (entry.getKey().getRow().toString().equals("row2")) sawRow2 = true; } if (!sawRow2) Assert.fail("We did not see row2 and we should have"); }
Example 12
Source File: ConfigUtils.java From rya with Apache License 2.0 | 5 votes |
public static BatchWriter createDefaultBatchWriter(final String tablename, final Configuration conf) throws TableNotFoundException, AccumuloException, AccumuloSecurityException { final Long DEFAULT_MAX_MEMORY = getWriterMaxMemory(conf); final Long DEFAULT_MAX_LATENCY = getWriterMaxLatency(conf); final Integer DEFAULT_MAX_WRITE_THREADS = getWriterMaxWriteThreads(conf); final Connector connector = ConfigUtils.getConnector(conf); return connector.createBatchWriter(tablename, DEFAULT_MAX_MEMORY, DEFAULT_MAX_LATENCY, DEFAULT_MAX_WRITE_THREADS); }
Example 13
Source File: UpgradeCounterValues.java From datawave with Apache License 2.0 | 4 votes |
protected void run(String[] args) throws ParseException, AccumuloSecurityException, AccumuloException, TableNotFoundException, IOException { parseConfig(args); ZooKeeperInstance instance = new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zookeepers)); Connector connector = instance.getConnector(username, new PasswordToken(password)); Authorizations auths = connector.securityOperations().getUserAuthorizations(connector.whoami()); try (BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig().setMaxWriteThreads(bwThreads).setMaxMemory(bwMemory) .setMaxLatency(60, TimeUnit.SECONDS)); BatchScanner scanner = connector.createBatchScanner(tableName, auths, bsThreads)) { scanner.setRanges(ranges); for (Entry<Key,Value> entry : scanner) { Key key = entry.getKey(); ByteArrayDataInput in = ByteStreams.newDataInput(entry.getValue().get()); Counters counters = new Counters(); try { counters.readFields(in); } catch (IOException e) { // The IO exception means the counters are in the wrong format. We *assume* that they are in // the old (CDH3) format, and de-serialize according to that, and re-write the key with the new value. in = ByteStreams.newDataInput(entry.getValue().get()); int numGroups = in.readInt(); while (numGroups-- > 0) { String groupName = Text.readString(in); String groupDisplayName = Text.readString(in); CounterGroup group = counters.addGroup(groupName, groupDisplayName); int groupSize = WritableUtils.readVInt(in); for (int i = 0; i < groupSize; i++) { String counterName = Text.readString(in); String counterDisplayName = counterName; if (in.readBoolean()) counterDisplayName = Text.readString(in); long value = WritableUtils.readVLong(in); group.addCounter(counterName, counterDisplayName, value); } } ByteArrayDataOutput out = ByteStreams.newDataOutput(); counters.write(out); Mutation m = new Mutation(key.getRow()); m.put(key.getColumnFamily(), key.getColumnQualifier(), key.getColumnVisibilityParsed(), key.getTimestamp() + 1, new Value(out.toByteArray())); writer.addMutation(m); } } } }
Example 14
Source File: AccumuloMutationProcessor.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 4 votes |
@Override @SuppressWarnings("unchecked") public void setConf(Configuration config) { this.conf = config; // Get the implementation of MutationTransformer to use. // By default, we call toString() on every non-null field. Class<? extends MutationTransformer> xformerClass = (Class<? extends MutationTransformer>) this.conf.getClass(AccumuloConstants.TRANSFORMER_CLASS_KEY, ToStringMutationTransformer.class); this.mutationTransformer = (MutationTransformer) ReflectionUtils.newInstance(xformerClass, this.conf); if (null == mutationTransformer) { throw new RuntimeException("Could not instantiate MutationTransformer."); } String colFam = conf.get(AccumuloConstants.COL_FAMILY_KEY, null); if (null == colFam) { throw new RuntimeException("Accumulo column family not set."); } this.mutationTransformer.setColumnFamily(colFam); String rowKey = conf.get(AccumuloConstants.ROW_KEY_COLUMN_KEY, null); if (null == rowKey) { throw new RuntimeException("Row key column not set."); } this.mutationTransformer.setRowKeyColumn(rowKey); String vis = conf.get(AccumuloConstants.VISIBILITY_KEY, null); this.mutationTransformer.setVisibility(vis); this.tableName = conf.get(AccumuloConstants.TABLE_NAME_KEY, null); String zookeeper = conf.get(AccumuloConstants.ZOOKEEPERS); String instance = conf.get(AccumuloConstants.ACCUMULO_INSTANCE); Instance inst = new ZooKeeperInstance(instance, zookeeper); String username = conf.get(AccumuloConstants.ACCUMULO_USER_NAME); String pw = conf.get(AccumuloConstants.ACCUMULO_PASSWORD); if (null == pw) { pw = ""; } byte[] password = pw.getBytes(); BatchWriterConfig bwc = new BatchWriterConfig(); long bs = conf.getLong(AccumuloConstants.BATCH_SIZE, AccumuloConstants.DEFAULT_BATCH_SIZE); bwc.setMaxMemory(bs); long la = conf.getLong(AccumuloConstants.MAX_LATENCY, AccumuloConstants.DEFAULT_LATENCY); bwc.setMaxLatency(la, TimeUnit.MILLISECONDS); try { Connector conn = inst.getConnector(username, new PasswordToken(password)); this.table = conn.createBatchWriter(tableName, bwc); } catch (AccumuloException ex) { throw new RuntimeException("Error accessing Accumulo", ex); } catch (AccumuloSecurityException aex){ throw new RuntimeException("Security exception accessing Accumulo", aex); } catch(TableNotFoundException tex){ throw new RuntimeException("Accumulo table " + tableName + " not found", tex); } }
Example 15
Source File: WebSocketClientIT.java From qonduit with Apache License 2.0 | 4 votes |
private void doScan(WebSocketClient client) throws Exception { long now = System.currentTimeMillis(); String tableName = "qonduit.scanTest"; Connector con = mac.getConnector(MAC_ROOT_USER, MAC_ROOT_PASSWORD); con.namespaceOperations().create("qonduit"); con.tableOperations().create(tableName); BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency(2, TimeUnit.SECONDS); BatchWriter writer = con.createBatchWriter(tableName, bwc); ColumnVisibility cv = new ColumnVisibility(); for (int i = 0; i < 10; i++) { Mutation m = new Mutation("m" + i); m.put("cf" + i, "cq" + i, cv, now + i, Integer.toString(i)); writer.addMutation(m); } writer.flush(); writer.close(); sleepUninterruptibly(2, TimeUnit.SECONDS); List<byte[]> responses = new ArrayList<>(); String id = UUID.randomUUID().toString(); ScanRequest request = new ScanRequest(); request.setRequestId(id); request.setTableName(tableName); request.setResultBatchSize(5); doIt(client, request, responses, 3); Assert.assertEquals(11, responses.size()); for (byte[] b : responses) { KVPair kv = JsonSerializer.getObjectMapper().readValue(b, KVPair.class); Value val = kv.getValue(); if (null != val) { int num = Integer.parseInt(new String(val.getValue())); Key key = kv.getKey().toKey(); Assert.assertEquals("m" + num, key.getRow().toString()); Assert.assertEquals("cf" + num, key.getColumnFamily().toString()); Assert.assertEquals("cq" + num, key.getColumnQualifier().toString()); Assert.assertEquals(now + num, key.getTimestamp()); Assert.assertEquals(id, kv.getRequestId()); } else { Assert.assertTrue(kv.isEndOfResults()); } } }
Example 16
Source File: MockLoader.java From datawave with Apache License 2.0 | 4 votes |
@Override public InMemoryInstance call() throws Exception { InMemoryInstance instance = new InMemoryInstance(UUID.randomUUID() + key.table); Authorizations auths = key.connector.securityOperations().getUserAuthorizations(key.user); if (log.isTraceEnabled()) { log.trace("Building mock instances, with auths " + auths + " from " + key); } Connector instanceConnector = instance.getConnector(key.user, MOCK_PASSWORD); instanceConnector.securityOperations().changeUserAuthorizations(key.user, auths); if (instanceConnector.tableOperations().exists(key.table)) instanceConnector.tableOperations().delete(key.table); instanceConnector.tableOperations().create(key.table); try (BatchScanner scanner = key.connector.createBatchScanner(key.table, auths, 11); BatchWriter writer = instanceConnector.createBatchWriter(key.table, 100L * (1024L * 1024L), 100L, 1)) { scanner.setRanges(Lists.newArrayList(new Range())); Iterator<Entry<Key,Value>> iter = scanner.iterator(); while (iter.hasNext()) { Entry<Key,Value> value = iter.next(); Key valueKey = value.getKey(); Mutation m = new Mutation(value.getKey().getRow()); m.put(valueKey.getColumnFamily(), valueKey.getColumnQualifier(), new ColumnVisibility(valueKey.getColumnVisibility()), valueKey.getTimestamp(), value.getValue()); writer.addMutation(m); } } if (log.isTraceEnabled()) log.trace("Built new instance " + instance.hashCode() + " now returning for use"); return instance; }
Example 17
Source File: NumShardsTest.java From datawave with Apache License 2.0 | 4 votes |
@Test public void testUpdateCacheWithoutEntries() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException, ParseException { // configure mock accumulo instance and populate with a couple of multiple numshards entries PasswordToken noPasswordToken = new PasswordToken(); InMemoryInstance i = new InMemoryInstance("mock2"); Connector connector = i.getConnector("root", noPasswordToken); Configuration conf = new Configuration(); conf.set(AccumuloHelper.USERNAME, "root"); conf.set(AccumuloHelper.INSTANCE_NAME, "mock2"); conf.set(AccumuloHelper.PASSWORD, noPasswordToken.toString()); conf.set(AccumuloHelper.ZOOKEEPERS, i.getZooKeepers()); conf.set(ShardedDataTypeHandler.METADATA_TABLE_NAME, TableName.METADATA); connector.tableOperations().create(conf.get(ShardedDataTypeHandler.METADATA_TABLE_NAME)); BatchWriter recordWriter = connector.createBatchWriter(conf.get(ShardedDataTypeHandler.METADATA_TABLE_NAME), new BatchWriterConfig()); // write a couiple of entries for multiple numshards Mutation m = new Mutation(NumShards.NUM_SHARDS); m.put(NumShards.NUM_SHARDS_CF + "blah", "20171102_19", ""); recordWriter.addMutation(m); recordWriter.close(); File multipleNumShardCache = File.createTempFile("numshards", ".txt"); multipleNumShardCache.deleteOnExit(); conf.set(ShardIdFactory.NUM_SHARDS, "11"); conf.set(NumShards.ENABLE_MULTIPLE_NUMSHARDS, "true"); conf.set(NumShards.MULTIPLE_NUMSHARDS_CACHE_PATH, multipleNumShardCache.getParent()); AccumuloHelper mockedAccumuloHelper = EasyMock.createMock(AccumuloHelper.class); mockedAccumuloHelper.setup(conf); EasyMock.expectLastCall(); EasyMock.expect(mockedAccumuloHelper.getConnector()).andReturn(connector); EasyMock.replay(mockedAccumuloHelper); NumShards numShards = new NumShards(conf); // these should create numshards.txt file based on multiple numshards entries in mock accumulo numShards.setaHelper(mockedAccumuloHelper); numShards.updateCache(); assertEquals(11, numShards.getNumShards(0)); assertEquals(11, numShards.getNumShards(Long.MAX_VALUE)); assertEquals(11, numShards.getNumShards("")); assertEquals(11, numShards.getNumShards("20171102")); assertEquals(11, numShards.getMinNumShards()); assertEquals(11, numShards.getMaxNumShards()); assertEquals(1, numShards.getShardCount()); }
Example 18
Source File: Indexer.java From presto with Apache License 2.0 | 4 votes |
public Indexer( Connector connector, Authorizations auths, AccumuloTable table, BatchWriterConfig writerConfig) throws TableNotFoundException { this.connector = requireNonNull(connector, "connector is null"); this.table = requireNonNull(table, "table is null"); this.writerConfig = requireNonNull(writerConfig, "writerConfig is null"); requireNonNull(auths, "auths is null"); this.serializer = table.getSerializerInstance(); // Create our batch writer indexWriter = connector.createBatchWriter(table.getIndexTableName(), writerConfig); ImmutableMultimap.Builder<ByteBuffer, ByteBuffer> indexColumnsBuilder = ImmutableMultimap.builder(); Map<ByteBuffer, Map<ByteBuffer, Type>> indexColumnTypesBuilder = new HashMap<>(); // Initialize metadata table.getColumns().forEach(columnHandle -> { if (columnHandle.isIndexed()) { // Wrap the column family and qualifier for this column and add it to // collection of indexed columns ByteBuffer family = wrap(columnHandle.getFamily().get().getBytes(UTF_8)); ByteBuffer qualifier = wrap(columnHandle.getQualifier().get().getBytes(UTF_8)); indexColumnsBuilder.put(family, qualifier); // Create a mapping for this column's Presto type, again creating a new one for the // family if necessary Map<ByteBuffer, Type> types = indexColumnTypesBuilder.get(family); if (types == null) { types = new HashMap<>(); indexColumnTypesBuilder.put(family, types); } types.put(qualifier, columnHandle.getType()); } }); indexColumns = indexColumnsBuilder.build(); indexColumnTypes = ImmutableMap.copyOf(indexColumnTypesBuilder); // If there are no indexed columns, throw an exception if (indexColumns.isEmpty()) { throw new PrestoException(NOT_SUPPORTED, "No indexed columns in table metadata. Refusing to index a table with no indexed columns"); } // Initialize metrics map // This metrics map is for column cardinality metrics.put(METRICS_TABLE_ROW_COUNT, new AtomicLong(0)); // Scan the metrics table for existing first row and last row Pair<byte[], byte[]> minmax = getMinMaxRowIds(connector, table, auths); firstRow = minmax.getLeft(); lastRow = minmax.getRight(); }
Example 19
Source File: AccumuloGeoSpatialStore.java From accumulo-recipes with Apache License 2.0 | 3 votes |
public AccumuloGeoSpatialStore(Connector connector, String tableName, StoreConfig config, double maxPrecision, int numPartitions) throws TableExistsException, AccumuloSecurityException, AccumuloException, TableNotFoundException { this.connector = connector; this.config = config; this.tableName = tableName; this.maxPrecision = maxPrecision; this.numPartitions = numPartitions; if (!connector.tableOperations().exists(tableName)) connector.tableOperations().create(tableName); writer = connector.createBatchWriter(tableName, config.getMaxMemory(), config.getMaxLatency(), config.getMaxWriteThreads()); }