Java Code Examples for org.apache.accumulo.core.client.BatchWriterConfig#setMaxLatency()
The following examples show how to use
org.apache.accumulo.core.client.BatchWriterConfig#setMaxLatency() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RdfCloudTripleStoreSelectivityEvaluationStatisticsTest.java From rya with Apache License 2.0 | 6 votes |
@Before public void init() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException { mock = new MockInstance("accumulo"); PasswordToken pToken = new PasswordToken("pass".getBytes()); conn = mock.getConnector("user", pToken); config = new BatchWriterConfig(); config.setMaxMemory(1000); config.setMaxLatency(1000, TimeUnit.SECONDS); config.setMaxWriteThreads(10); if (conn.tableOperations().exists("rya_prospects")) { conn.tableOperations().delete("rya_prospects"); } if (conn.tableOperations().exists("rya_selectivity")) { conn.tableOperations().delete("rya_selectivity"); } arc = new AccumuloRdfConfiguration(); arc.setTableLayoutStrategy(new TablePrefixLayoutStrategy()); arc.setMaxRangesForScanner(300); }
Example 2
Source File: QueryJoinSelectOptimizerTest.java From rya with Apache License 2.0 | 6 votes |
@Before public void init() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException { mock = new MockInstance("accumulo"); PasswordToken pToken = new PasswordToken("pass".getBytes()); conn = mock.getConnector("user", pToken); config = new BatchWriterConfig(); config.setMaxMemory(1000); config.setMaxLatency(1000, TimeUnit.SECONDS); config.setMaxWriteThreads(10); if (conn.tableOperations().exists("rya_prospects")) { conn.tableOperations().delete("rya_prospects"); } if (conn.tableOperations().exists("rya_selectivity")) { conn.tableOperations().delete("rya_selectivity"); } arc = new AccumuloRdfConfiguration(); arc.setTableLayoutStrategy(new TablePrefixLayoutStrategy()); arc.setMaxRangesForScanner(300); res = new ProspectorServiceEvalStatsDAO(conn, arc); }
Example 3
Source File: AccumuloStorage.java From rya with Apache License 2.0 | 6 votes |
@Override public void setStoreLocation(final String location, final Job job) throws IOException { conf = job.getConfiguration(); setLocationFromUri(location, job); if (!conf.getBoolean(AccumuloOutputFormat.class.getSimpleName() + ".configured", false)) { try { AccumuloOutputFormat.setConnectorInfo(job, user, new PasswordToken(userP.getBytes(StandardCharsets.UTF_8))); } catch (final AccumuloSecurityException e) { throw new RuntimeException(e); } AccumuloOutputFormat.setDefaultTableName(job, table); AccumuloOutputFormat.setZooKeeperInstance(job, inst, zookeepers); final BatchWriterConfig config = new BatchWriterConfig(); config.setMaxLatency(10, TimeUnit.SECONDS); config.setMaxMemory(10 * 1000 * 1000); config.setMaxWriteThreads(10); AccumuloOutputFormat.setBatchWriterOptions(job, config); } }
Example 4
Source File: AccumuloSelectivityEvalDAOTest.java From rya with Apache License 2.0 | 6 votes |
@Before public void init() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException { mock = new MockInstance("accumulo"); PasswordToken pToken = new PasswordToken("pass".getBytes()); conn = mock.getConnector("user", pToken); config = new BatchWriterConfig(); config.setMaxMemory(1000); config.setMaxLatency(1000, TimeUnit.SECONDS); config.setMaxWriteThreads(10); if (conn.tableOperations().exists("rya_prospects")) { conn.tableOperations().delete("rya_prospects"); } if (conn.tableOperations().exists("rya_selectivity")) { conn.tableOperations().delete("rya_selectivity"); } arc = new AccumuloRdfConfiguration(); res = new ProspectorServiceEvalStatsDAO(conn, arc); arc.setTableLayoutStrategy(new TablePrefixLayoutStrategy()); arc.setMaxRangesForScanner(300); }
Example 5
Source File: TestAbstractAccumuloStorage.java From spork with Apache License 2.0 | 6 votes |
public Job getExpectedStoreJob(String inst, String zookeepers, String user, String password, String table, long maxWriteBufferSize, int writeThreads, int maxWriteLatencyMS) throws IOException { Job expected = new Job(new Configuration()); try { AccumuloOutputFormat.setConnectorInfo(expected, user, new PasswordToken(password)); } catch (AccumuloSecurityException e) { Assert.fail(e.getMessage()); } AccumuloOutputFormat.setZooKeeperInstance(expected, inst, zookeepers); AccumuloOutputFormat.setCreateTables(expected, true); BatchWriterConfig bwConfig = new BatchWriterConfig(); bwConfig.setMaxLatency(maxWriteLatencyMS, TimeUnit.MILLISECONDS); bwConfig.setMaxMemory(maxWriteBufferSize); bwConfig.setMaxWriteThreads(writeThreads); AccumuloOutputFormat.setBatchWriterOptions(expected, bwConfig); return expected; }
Example 6
Source File: BatchWriterOpts.java From accumulo-examples with Apache License 2.0 | 5 votes |
public BatchWriterConfig getBatchWriterConfig() { BatchWriterConfig config = new BatchWriterConfig(); config.setMaxWriteThreads(this.batchThreads); config.setMaxLatency(this.batchLatency, TimeUnit.MILLISECONDS); config.setMaxMemory(this.batchMemory); config.setTimeout(this.batchTimeout, TimeUnit.MILLISECONDS); return config; }
Example 7
Source File: AccumuloGraphConfiguration.java From vertexium with Apache License 2.0 | 5 votes |
public BatchWriterConfig createBatchWriterConfig() { long maxMemory = getConfigLong(BATCHWRITER_MAX_MEMORY, DEFAULT_BATCHWRITER_MAX_MEMORY); long maxLatency = getConfigLong(BATCHWRITER_MAX_LATENCY, DEFAULT_BATCHWRITER_MAX_LATENCY); int maxWriteThreads = getInt(BATCHWRITER_MAX_WRITE_THREADS, DEFAULT_BATCHWRITER_MAX_WRITE_THREADS); long timeout = getConfigLong(BATCHWRITER_TIMEOUT, DEFAULT_BATCHWRITER_TIMEOUT); BatchWriterConfig config = new BatchWriterConfig(); config.setMaxMemory(maxMemory); config.setMaxLatency(maxLatency, TimeUnit.MILLISECONDS); config.setMaxWriteThreads(maxWriteThreads); config.setTimeout(timeout, TimeUnit.MILLISECONDS); return config; }
Example 8
Source File: AbstractAccumuloStorage.java From spork with Apache License 2.0 | 5 votes |
public void setStoreLocation(String location, Job job) throws IOException { setLocationFromUri(location); loadDependentJars(job.getConfiguration()); Map<String, String> entries = getOutputFormatEntries(job .getConfiguration()); unsetEntriesFromConfiguration(job.getConfiguration(), entries); try { AccumuloOutputFormat.setConnectorInfo(job, user, new PasswordToken( password)); } catch (AccumuloSecurityException e) { throw new IOException(e); } AccumuloOutputFormat.setCreateTables(job, true); AccumuloOutputFormat.setZooKeeperInstance(job, inst, zookeepers); BatchWriterConfig bwConfig = new BatchWriterConfig(); bwConfig.setMaxLatency(maxLatency, TimeUnit.MILLISECONDS); bwConfig.setMaxMemory(maxMutationBufferSize); bwConfig.setMaxWriteThreads(maxWriteThreads); AccumuloOutputFormat.setBatchWriterOptions(job, bwConfig); log.info("Writing data to " + table); configureOutputFormat(job); }
Example 9
Source File: AccumuloOperations.java From geowave with Apache License 2.0 | 5 votes |
public BatchWriter createBatchWriter(final String tableName) throws TableNotFoundException { final String qName = getQualifiedTableName(tableName); final BatchWriterConfig config = new BatchWriterConfig(); config.setMaxMemory(byteBufferSize); config.setMaxLatency(timeoutMillis, TimeUnit.MILLISECONDS); config.setMaxWriteThreads(numThreads); return connector.createBatchWriter(qName, config); }
Example 10
Source File: PutAccumuloRecord.java From nifi with Apache License 2.0 | 5 votes |
@OnScheduled public void onScheduled(final ProcessContext context) { accumuloConnectorService = context.getProperty(ACCUMULO_CONNECTOR_SERVICE).asControllerService(BaseAccumuloService.class); final Double maxBytes = context.getProperty(MEMORY_SIZE).asDataSize(DataUnit.B); this.client = accumuloConnectorService.getClient(); BatchWriterConfig writerConfig = new BatchWriterConfig(); writerConfig.setMaxWriteThreads(context.getProperty(THREADS).asInteger()); writerConfig.setMaxMemory(maxBytes.longValue()); tableWriter = client.createMultiTableBatchWriter(writerConfig); flushOnEveryFlow = context.getProperty(FLUSH_ON_FLOWFILE).asBoolean(); if (!flushOnEveryFlow){ writerConfig.setMaxLatency(60, TimeUnit.SECONDS); } if (context.getProperty(CREATE_TABLE).asBoolean() && !context.getProperty(TABLE_NAME).isExpressionLanguagePresent()) { final Map<String, String> flowAttributes = new HashMap<>(); final String table = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(flowAttributes).getValue(); final TableOperations tableOps = this.client.tableOperations(); if (!tableOps.exists(table)) { getLogger().info("Creating " + table + " table."); try { tableOps.create(table); } catch (TableExistsException te) { // can safely ignore } catch (AccumuloSecurityException | AccumuloException e) { getLogger().info("Accumulo or Security error creating. Continuing... " + table + ". ", e); } } } }
Example 11
Source File: WebSocketClientIT.java From qonduit with Apache License 2.0 | 4 votes |
private void doScan(WebSocketClient client) throws Exception { long now = System.currentTimeMillis(); String tableName = "qonduit.scanTest"; Connector con = mac.getConnector(MAC_ROOT_USER, MAC_ROOT_PASSWORD); con.namespaceOperations().create("qonduit"); con.tableOperations().create(tableName); BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency(2, TimeUnit.SECONDS); BatchWriter writer = con.createBatchWriter(tableName, bwc); ColumnVisibility cv = new ColumnVisibility(); for (int i = 0; i < 10; i++) { Mutation m = new Mutation("m" + i); m.put("cf" + i, "cq" + i, cv, now + i, Integer.toString(i)); writer.addMutation(m); } writer.flush(); writer.close(); sleepUninterruptibly(2, TimeUnit.SECONDS); List<byte[]> responses = new ArrayList<>(); String id = UUID.randomUUID().toString(); ScanRequest request = new ScanRequest(); request.setRequestId(id); request.setTableName(tableName); request.setResultBatchSize(5); doIt(client, request, responses, 3); Assert.assertEquals(11, responses.size()); for (byte[] b : responses) { KVPair kv = JsonSerializer.getObjectMapper().readValue(b, KVPair.class); Value val = kv.getValue(); if (null != val) { int num = Integer.parseInt(new String(val.getValue())); Key key = kv.getKey().toKey(); Assert.assertEquals("m" + num, key.getRow().toString()); Assert.assertEquals("cf" + num, key.getColumnFamily().toString()); Assert.assertEquals("cq" + num, key.getColumnQualifier().toString()); Assert.assertEquals(now + num, key.getTimestamp()); Assert.assertEquals(id, kv.getRequestId()); } else { Assert.assertTrue(kv.isEndOfResults()); } } }
Example 12
Source File: AccumuloMutationProcessor.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 4 votes |
@Override @SuppressWarnings("unchecked") public void setConf(Configuration config) { this.conf = config; // Get the implementation of MutationTransformer to use. // By default, we call toString() on every non-null field. Class<? extends MutationTransformer> xformerClass = (Class<? extends MutationTransformer>) this.conf.getClass(AccumuloConstants.TRANSFORMER_CLASS_KEY, ToStringMutationTransformer.class); this.mutationTransformer = (MutationTransformer) ReflectionUtils.newInstance(xformerClass, this.conf); if (null == mutationTransformer) { throw new RuntimeException("Could not instantiate MutationTransformer."); } String colFam = conf.get(AccumuloConstants.COL_FAMILY_KEY, null); if (null == colFam) { throw new RuntimeException("Accumulo column family not set."); } this.mutationTransformer.setColumnFamily(colFam); String rowKey = conf.get(AccumuloConstants.ROW_KEY_COLUMN_KEY, null); if (null == rowKey) { throw new RuntimeException("Row key column not set."); } this.mutationTransformer.setRowKeyColumn(rowKey); String vis = conf.get(AccumuloConstants.VISIBILITY_KEY, null); this.mutationTransformer.setVisibility(vis); this.tableName = conf.get(AccumuloConstants.TABLE_NAME_KEY, null); String zookeeper = conf.get(AccumuloConstants.ZOOKEEPERS); String instance = conf.get(AccumuloConstants.ACCUMULO_INSTANCE); Instance inst = new ZooKeeperInstance(instance, zookeeper); String username = conf.get(AccumuloConstants.ACCUMULO_USER_NAME); String pw = conf.get(AccumuloConstants.ACCUMULO_PASSWORD); if (null == pw) { pw = ""; } byte[] password = pw.getBytes(); BatchWriterConfig bwc = new BatchWriterConfig(); long bs = conf.getLong(AccumuloConstants.BATCH_SIZE, AccumuloConstants.DEFAULT_BATCH_SIZE); bwc.setMaxMemory(bs); long la = conf.getLong(AccumuloConstants.MAX_LATENCY, AccumuloConstants.DEFAULT_LATENCY); bwc.setMaxLatency(la, TimeUnit.MILLISECONDS); try { Connector conn = inst.getConnector(username, new PasswordToken(password)); this.table = conn.createBatchWriter(tableName, bwc); } catch (AccumuloException ex) { throw new RuntimeException("Error accessing Accumulo", ex); } catch (AccumuloSecurityException aex){ throw new RuntimeException("Security exception accessing Accumulo", aex); } catch(TableNotFoundException tex){ throw new RuntimeException("Accumulo table " + tableName + " not found", tex); } }