Java Code Examples for org.apache.accumulo.core.client.BatchWriterConfig#setMaxWriteThreads()
The following examples show how to use
org.apache.accumulo.core.client.BatchWriterConfig#setMaxWriteThreads() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RdfCloudTripleStoreSelectivityEvaluationStatisticsTest.java From rya with Apache License 2.0 | 6 votes |
@Before public void init() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException { mock = new MockInstance("accumulo"); PasswordToken pToken = new PasswordToken("pass".getBytes()); conn = mock.getConnector("user", pToken); config = new BatchWriterConfig(); config.setMaxMemory(1000); config.setMaxLatency(1000, TimeUnit.SECONDS); config.setMaxWriteThreads(10); if (conn.tableOperations().exists("rya_prospects")) { conn.tableOperations().delete("rya_prospects"); } if (conn.tableOperations().exists("rya_selectivity")) { conn.tableOperations().delete("rya_selectivity"); } arc = new AccumuloRdfConfiguration(); arc.setTableLayoutStrategy(new TablePrefixLayoutStrategy()); arc.setMaxRangesForScanner(300); }
Example 2
Source File: QueryJoinSelectOptimizerTest.java From rya with Apache License 2.0 | 6 votes |
@Before public void init() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException { mock = new MockInstance("accumulo"); PasswordToken pToken = new PasswordToken("pass".getBytes()); conn = mock.getConnector("user", pToken); config = new BatchWriterConfig(); config.setMaxMemory(1000); config.setMaxLatency(1000, TimeUnit.SECONDS); config.setMaxWriteThreads(10); if (conn.tableOperations().exists("rya_prospects")) { conn.tableOperations().delete("rya_prospects"); } if (conn.tableOperations().exists("rya_selectivity")) { conn.tableOperations().delete("rya_selectivity"); } arc = new AccumuloRdfConfiguration(); arc.setTableLayoutStrategy(new TablePrefixLayoutStrategy()); arc.setMaxRangesForScanner(300); res = new ProspectorServiceEvalStatsDAO(conn, arc); }
Example 3
Source File: AccumuloStorage.java From rya with Apache License 2.0 | 6 votes |
@Override public void setStoreLocation(final String location, final Job job) throws IOException { conf = job.getConfiguration(); setLocationFromUri(location, job); if (!conf.getBoolean(AccumuloOutputFormat.class.getSimpleName() + ".configured", false)) { try { AccumuloOutputFormat.setConnectorInfo(job, user, new PasswordToken(userP.getBytes(StandardCharsets.UTF_8))); } catch (final AccumuloSecurityException e) { throw new RuntimeException(e); } AccumuloOutputFormat.setDefaultTableName(job, table); AccumuloOutputFormat.setZooKeeperInstance(job, inst, zookeepers); final BatchWriterConfig config = new BatchWriterConfig(); config.setMaxLatency(10, TimeUnit.SECONDS); config.setMaxMemory(10 * 1000 * 1000); config.setMaxWriteThreads(10); AccumuloOutputFormat.setBatchWriterOptions(job, config); } }
Example 4
Source File: AccumuloSelectivityEvalDAOTest.java From rya with Apache License 2.0 | 6 votes |
@Before public void init() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException { mock = new MockInstance("accumulo"); PasswordToken pToken = new PasswordToken("pass".getBytes()); conn = mock.getConnector("user", pToken); config = new BatchWriterConfig(); config.setMaxMemory(1000); config.setMaxLatency(1000, TimeUnit.SECONDS); config.setMaxWriteThreads(10); if (conn.tableOperations().exists("rya_prospects")) { conn.tableOperations().delete("rya_prospects"); } if (conn.tableOperations().exists("rya_selectivity")) { conn.tableOperations().delete("rya_selectivity"); } arc = new AccumuloRdfConfiguration(); res = new ProspectorServiceEvalStatsDAO(conn, arc); arc.setTableLayoutStrategy(new TablePrefixLayoutStrategy()); arc.setMaxRangesForScanner(300); }
Example 5
Source File: TestAbstractAccumuloStorage.java From spork with Apache License 2.0 | 6 votes |
public Job getExpectedStoreJob(String inst, String zookeepers, String user, String password, String table, long maxWriteBufferSize, int writeThreads, int maxWriteLatencyMS) throws IOException { Job expected = new Job(new Configuration()); try { AccumuloOutputFormat.setConnectorInfo(expected, user, new PasswordToken(password)); } catch (AccumuloSecurityException e) { Assert.fail(e.getMessage()); } AccumuloOutputFormat.setZooKeeperInstance(expected, inst, zookeepers); AccumuloOutputFormat.setCreateTables(expected, true); BatchWriterConfig bwConfig = new BatchWriterConfig(); bwConfig.setMaxLatency(maxWriteLatencyMS, TimeUnit.MILLISECONDS); bwConfig.setMaxMemory(maxWriteBufferSize); bwConfig.setMaxWriteThreads(writeThreads); AccumuloOutputFormat.setBatchWriterOptions(expected, bwConfig); return expected; }
Example 6
Source File: BatchWriterOpts.java From accumulo-examples with Apache License 2.0 | 5 votes |
public BatchWriterConfig getBatchWriterConfig() { BatchWriterConfig config = new BatchWriterConfig(); config.setMaxWriteThreads(this.batchThreads); config.setMaxLatency(this.batchLatency, TimeUnit.MILLISECONDS); config.setMaxMemory(this.batchMemory); config.setTimeout(this.batchTimeout, TimeUnit.MILLISECONDS); return config; }
Example 7
Source File: AccumuloGraphConfiguration.java From vertexium with Apache License 2.0 | 5 votes |
public BatchWriterConfig createBatchWriterConfig() { long maxMemory = getConfigLong(BATCHWRITER_MAX_MEMORY, DEFAULT_BATCHWRITER_MAX_MEMORY); long maxLatency = getConfigLong(BATCHWRITER_MAX_LATENCY, DEFAULT_BATCHWRITER_MAX_LATENCY); int maxWriteThreads = getInt(BATCHWRITER_MAX_WRITE_THREADS, DEFAULT_BATCHWRITER_MAX_WRITE_THREADS); long timeout = getConfigLong(BATCHWRITER_TIMEOUT, DEFAULT_BATCHWRITER_TIMEOUT); BatchWriterConfig config = new BatchWriterConfig(); config.setMaxMemory(maxMemory); config.setMaxLatency(maxLatency, TimeUnit.MILLISECONDS); config.setMaxWriteThreads(maxWriteThreads); config.setTimeout(timeout, TimeUnit.MILLISECONDS); return config; }
Example 8
Source File: AbstractAccumuloStorage.java From spork with Apache License 2.0 | 5 votes |
public void setStoreLocation(String location, Job job) throws IOException { setLocationFromUri(location); loadDependentJars(job.getConfiguration()); Map<String, String> entries = getOutputFormatEntries(job .getConfiguration()); unsetEntriesFromConfiguration(job.getConfiguration(), entries); try { AccumuloOutputFormat.setConnectorInfo(job, user, new PasswordToken( password)); } catch (AccumuloSecurityException e) { throw new IOException(e); } AccumuloOutputFormat.setCreateTables(job, true); AccumuloOutputFormat.setZooKeeperInstance(job, inst, zookeepers); BatchWriterConfig bwConfig = new BatchWriterConfig(); bwConfig.setMaxLatency(maxLatency, TimeUnit.MILLISECONDS); bwConfig.setMaxMemory(maxMutationBufferSize); bwConfig.setMaxWriteThreads(maxWriteThreads); AccumuloOutputFormat.setBatchWriterOptions(job, bwConfig); log.info("Writing data to " + table); configureOutputFormat(job); }
Example 9
Source File: AccumuloOperations.java From geowave with Apache License 2.0 | 5 votes |
public BatchWriter createBatchWriter(final String tableName) throws TableNotFoundException { final String qName = getQualifiedTableName(tableName); final BatchWriterConfig config = new BatchWriterConfig(); config.setMaxMemory(byteBufferSize); config.setMaxLatency(timeoutMillis, TimeUnit.MILLISECONDS); config.setMaxWriteThreads(numThreads); return connector.createBatchWriter(qName, config); }
Example 10
Source File: PutAccumuloRecord.java From nifi with Apache License 2.0 | 5 votes |
@OnScheduled public void onScheduled(final ProcessContext context) { accumuloConnectorService = context.getProperty(ACCUMULO_CONNECTOR_SERVICE).asControllerService(BaseAccumuloService.class); final Double maxBytes = context.getProperty(MEMORY_SIZE).asDataSize(DataUnit.B); this.client = accumuloConnectorService.getClient(); BatchWriterConfig writerConfig = new BatchWriterConfig(); writerConfig.setMaxWriteThreads(context.getProperty(THREADS).asInteger()); writerConfig.setMaxMemory(maxBytes.longValue()); tableWriter = client.createMultiTableBatchWriter(writerConfig); flushOnEveryFlow = context.getProperty(FLUSH_ON_FLOWFILE).asBoolean(); if (!flushOnEveryFlow){ writerConfig.setMaxLatency(60, TimeUnit.SECONDS); } if (context.getProperty(CREATE_TABLE).asBoolean() && !context.getProperty(TABLE_NAME).isExpressionLanguagePresent()) { final Map<String, String> flowAttributes = new HashMap<>(); final String table = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(flowAttributes).getValue(); final TableOperations tableOps = this.client.tableOperations(); if (!tableOps.exists(table)) { getLogger().info("Creating " + table + " table."); try { tableOps.create(table); } catch (TableExistsException te) { // can safely ignore } catch (AccumuloSecurityException | AccumuloException e) { getLogger().info("Accumulo or Security error creating. Continuing... " + table + ". ", e); } } } }
Example 11
Source File: RyaOutputFormat.java From rya with Apache License 2.0 | 4 votes |
/** * Constructor. * @param conf Configuration containing any relevant options. * @throws IOException if the core Rya indexer or entity indexer can't * be initialized */ public RyaRecordWriter(final Configuration conf) throws IOException { // set the visibility final String visibility = conf.get(CV_PROPERTY); if (visibility != null) { cv = visibility.getBytes(StandardCharsets.UTF_8); } // set the default context final String context = conf.get(CONTEXT_PROPERTY, ""); if (context != null && !context.isEmpty()) { defaultContext = new RyaIRI(context); } // set up the buffer bufferSizeLimit = conf.getLong(MAX_MUTATION_BUFFER_SIZE, ONE_MEGABYTE); final int bufferCapacity = (int) (bufferSizeLimit / AVE_STATEMENT_SIZE); buffer = new ArrayList<RyaStatement>(bufferCapacity); // set up the indexers freeTextIndexer = getFreeTextIndexer(conf); temporalIndexer = getTemporalIndexer(conf); entityIndexer = getEntityIndexer(conf); ryaIndexer = getRyaIndexer(conf); // The entity index needs a batch writer -- typically it uses the DAO's, but decoupling // them lets it be used with or without the core tables, like the other indexers. if (entityIndexer != null) { Connector conn; try { conn = ConfigUtils.getConnector(conf); } catch (AccumuloException | AccumuloSecurityException e) { throw new IOException("Error connecting to Accumulo for entity index output", e); } final BatchWriterConfig batchWriterConfig = new BatchWriterConfig(); batchWriterConfig.setMaxMemory(RdfCloudTripleStoreConstants.MAX_MEMORY); batchWriterConfig.setTimeout(RdfCloudTripleStoreConstants.MAX_TIME, TimeUnit.MILLISECONDS); batchWriterConfig.setMaxWriteThreads(RdfCloudTripleStoreConstants.NUM_THREADS); writer = conn.createMultiTableBatchWriter(batchWriterConfig); entityIndexer.setMultiTableBatchWriter(writer); } // update fields used for metrics startTime = System.currentTimeMillis(); lastCommitFinishTime = startTime; // set up the triple context tripleContext = RyaTripleContext.getInstance(new AccumuloRdfConfiguration(conf)); }