org.apache.kudu.client.SessionConfiguration Java Examples
The following examples show how to use
org.apache.kudu.client.SessionConfiguration.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SchemaEmulationByTableNameConvention.java From presto with Apache License 2.0 | 6 votes |
private void createAndFillSchemasTable(KuduClient client) throws KuduException { List<String> existingSchemaNames = listSchemaNamesFromTablets(client); ColumnSchema schemaColumnSchema = new ColumnSchema.ColumnSchemaBuilder("schema", Type.STRING) .key(true).build(); Schema schema = new Schema(ImmutableList.of(schemaColumnSchema)); CreateTableOptions options = new CreateTableOptions(); options.addHashPartitions(ImmutableList.of(schemaColumnSchema.getName()), 2); KuduTable schemasTable = client.createTable(rawSchemasTableName, schema, options); KuduSession session = client.newSession(); try { session.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND); for (String schemaName : existingSchemaNames) { Insert insert = schemasTable.newInsert(); insert.getRow().addString(0, schemaName); session.apply(insert); } } finally { session.close(); } }
Example #2
Source File: KuduPageSink.java From presto with Apache License 2.0 | 6 votes |
private KuduPageSink( ConnectorSession connectorSession, KuduClientSession clientSession, KuduTable table, KuduTableMapping mapping) { requireNonNull(clientSession, "clientSession is null"); this.connectorSession = connectorSession; this.columnTypes = mapping.getColumnTypes(); this.originalColumnTypes = mapping.getOriginalColumnTypes(); this.generateUUID = mapping.isGenerateUUID(); this.table = table; this.session = clientSession.newSession(); this.session.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND); uuid = UUID.randomUUID().toString(); }
Example #3
Source File: DafAbstractKudu.java From daf-kylo with GNU Affero General Public License v3.0 | 6 votes |
@OnScheduled public void OnScheduled(final ProcessContext context) { try { tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions().getValue(); kuduMasters = context.getProperty(KUDU_MASTERS).evaluateAttributeExpressions().getValue(); if(kuduClient == null) { getLogger().debug("Setting up Kudu connection..."); kuduClient = getKuduConnection(kuduMasters); kuduTable = this.getKuduTable(kuduClient, tableName); getLogger().debug("Kudu connection successfully initialized"); } operationType = DafOperationType.valueOf(context.getProperty(INSERT_OPERATION).getValue()); batchSize = context.getProperty(BATCH_SIZE).evaluateAttributeExpressions().asInteger(); flushMode = SessionConfiguration.FlushMode.valueOf(context.getProperty(FLUSH_MODE).getValue()); skipHeadLine = context.getProperty(SKIP_HEAD_LINE).asBoolean(); } catch(KuduException ex){ //getLogger().error("Exception occurred while interacting with Kudu due to " + ex.getMessage(), ex); } }
Example #4
Source File: NativeKuduClientSession.java From presto-kudu with Apache License 2.0 | 6 votes |
private void createAndFillSchemasTable() throws KuduException { List<String> existingSchemaNames = listSchemaNamesFromTablets(); ColumnSchema tenantColumnSchema = new ColumnSchema.ColumnSchemaBuilder("tenant", Type.STRING) .key(true).build(); ColumnSchema schemaColumnSchema = new ColumnSchema.ColumnSchemaBuilder("schema", Type.STRING) .key(true).build(); Schema schema = new Schema(ImmutableList.of(tenantColumnSchema, schemaColumnSchema)); CreateTableOptions options = new CreateTableOptions(); options.setNumReplicas(1); // TODO config options.addHashPartitions(ImmutableList.of(tenantColumnSchema.getName()), 2); KuduTable schemasTable = client.createTable(rawSchemasTableName, schema, options); KuduSession session = client.newSession(); session.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND); try { for (String schemaName : existingSchemaNames) { Insert insert = schemasTable.newInsert(); fillSchemaRow(insert.getRow(), schemaName); session.apply(insert); } } finally { session.close(); } }
Example #5
Source File: KuduTarget.java From datacollector with Apache License 2.0 | 6 votes |
private KuduSession openKuduSession(List<ConfigIssue> issues) { KuduSession session = kuduClient.newSession(); try { session.setExternalConsistencyMode(ExternalConsistencyMode.valueOf(configBean.consistencyMode.name())); } catch (IllegalArgumentException ex) { issues.add( getContext().createConfigIssue( Groups.KUDU.name(), KuduConfigBean.CONF_PREFIX + CONSISTENCY_MODE, Errors.KUDU_02 ) ); } session.setMutationBufferSpace(configBean.mutationBufferSpace); session.setFlushMode(SessionConfiguration.FlushMode.MANUAL_FLUSH); return session; }
Example #6
Source File: IncrementalStepScanInputOperator.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
/*** * A simple init of the kudu connection config. Override this if you would like to fine tune hte connection parameters * @param kuduTableName The Kudu table name * @param kuduMasters The master hosts of the Kudu cluster. */ public void initKuduConfig(String kuduTableName, List<String> kuduMasters) { apexKuduConnectionBuilder = new ApexKuduConnection.ApexKuduConnectionBuilder() .withTableName(kuduTableName) .withExternalConsistencyMode(ExternalConsistencyMode.COMMIT_WAIT) .withFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_SYNC) .withNumberOfBossThreads(1) .withNumberOfWorkerThreads(2) .withSocketReadTimeOutAs(3000) .withOperationTimeOutAs(3000); for ( String aMasterAndHost: kuduMasters ) { apexKuduConnectionBuilder = apexKuduConnectionBuilder.withAPossibleMasterHostAs(aMasterAndHost); } apexKuduConnectionInfo = apexKuduConnectionBuilder; }
Example #7
Source File: TestKuduTS.java From kudu-ts with Apache License 2.0 | 6 votes |
/** Tests writing and querying a single series with no downsampling. */ @Test public void testSingleSeries() throws Exception { try (KuduTS ts = KuduTS.openOrCreate(ImmutableList.of(getMasterAddresses()), "testSingleSeries", CreateOptions.defaults())) { WriteBatch batch = ts.writeBatch(); batch.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND); try { String metric = "m"; SortedMap<String, String> tagset = tagset("k1", "v1"); Datapoints series = generateSeries(ts, metric, 420000, 10000, 100, tagset); for (Datapoint datapoint : series) { batch.writeDatapoint(metric, tagset, datapoint.getTime(), datapoint.getValue()); } batch.flush(); Query query = Query.create(metric, tagset, Aggregators.max()); assertEquals(series, ts.query(query).get(0)); } finally { batch.close(); } } }
Example #8
Source File: AbstractKuduProcessor.java From nifi with Apache License 2.0 | 5 votes |
protected void flushKuduSession(final KuduSession kuduSession, boolean close, final List<RowError> rowErrors) throws KuduException { final List<OperationResponse> responses = close ? kuduSession.close() : kuduSession.flush(); if (kuduSession.getFlushMode() == SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND) { rowErrors.addAll(Arrays.asList(kuduSession.getPendingErrors().getRowErrors())); } else { responses.stream() .filter(OperationResponse::hasRowError) .map(OperationResponse::getRowError) .forEach(rowErrors::add); } }
Example #9
Source File: PutKudu.java From nifi with Apache License 2.0 | 5 votes |
@OnScheduled public void onScheduled(final ProcessContext context) throws IOException, LoginException { batchSize = context.getProperty(BATCH_SIZE).evaluateAttributeExpressions().asInteger(); ffbatch = context.getProperty(FLOWFILE_BATCH_SIZE).evaluateAttributeExpressions().asInteger(); flushMode = SessionConfiguration.FlushMode.valueOf(context.getProperty(FLUSH_MODE).getValue().toUpperCase()); createKerberosUserAndOrKuduClient(context); }
Example #10
Source File: KuduWriter.java From geowave with Apache License 2.0 | 5 votes |
private boolean setAutoFlushMode() { session.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND); if (session.getFlushMode() != SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND) { LOGGER.error("Fail to set session Flush Mode to AUTO_FLUSH_BACKGROUND."); return false; } return true; }
Example #11
Source File: SimpleKuduOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
@Override ApexKuduConnection.ApexKuduConnectionBuilder getKuduConnectionConfig() { return new ApexKuduConnection.ApexKuduConnectionBuilder() .withAPossibleMasterHostAs(KuduClientTestCommons.kuduMasterAddresses) .withTableName(KuduClientTestCommons.tableName) .withExternalConsistencyMode(ExternalConsistencyMode.COMMIT_WAIT) .withFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_SYNC) .withNumberOfBossThreads(1) .withNumberOfWorkerThreads(2) .withSocketReadTimeOutAs(3000) .withOperationTimeOutAs(3000); }
Example #12
Source File: BaseKuduOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
private void initKuduConfig(String kuduTableName, List<String> kuduMasters) { apexKuduConnectionBuilder = new ApexKuduConnection.ApexKuduConnectionBuilder() .withTableName(kuduTableName) .withExternalConsistencyMode(ExternalConsistencyMode.COMMIT_WAIT) .withFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_SYNC) .withNumberOfBossThreads(1) .withNumberOfWorkerThreads(2) .withSocketReadTimeOutAs(3000) .withOperationTimeOutAs(3000); for ( String aMasterAndHost: kuduMasters ) { apexKuduConnectionBuilder = apexKuduConnectionBuilder.withAPossibleMasterHostAs(aMasterAndHost); } }
Example #13
Source File: KuduServiceImpl.java From beam with Apache License 2.0 | 5 votes |
@Override public void openSession() throws KuduException { // errors are collected per session so we align session with the bundle session = client.newSession(); // async flushing as per the official kudu-spark approach session.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND); }
Example #14
Source File: KuduConnection.java From envelope with Apache License 2.0 | 5 votes |
KuduConnection(Config config, byte[] token) { client = new KuduClient.KuduClientBuilder(config.getString(CONNECTION_CONFIG_NAME)).build(); if (token != null) { client.importAuthenticationCredentials(token); } session = client.newSession(); session.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND); session.setMutationBufferSpace(10000); session.setIgnoreAllDuplicateRows(KuduUtils.doesInsertIgnoreDuplicates(config)); }
Example #15
Source File: KuduPageSink.java From presto-kudu with Apache License 2.0 | 5 votes |
public KuduPageSink(ConnectorSession connectorSession, KuduClientSession clientSession, KuduExtendedTableHandle extendedTableHandle, boolean generateUUID) { requireNonNull(clientSession, "clientSession is null"); this.connectorSession = connectorSession; this.columnTypes = extendedTableHandle.getColumnTypes(); this.originalColumnTypes = extendedTableHandle.getOriginalColumnTypes(); this.generateUUID = generateUUID; this.table = extendedTableHandle.getTable(clientSession); this.session = clientSession.newSession(); this.session.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND); uuid = UUID.randomUUID().toString(); }
Example #16
Source File: KuduSink.java From sylph with Apache License 2.0 | 5 votes |
@Override public boolean open(long partitionId, long version) throws Exception { this.kuduClient = new KuduClient.KuduClientBuilder(kuduHost).build(); this.kuduSession = kuduClient.newSession(); this.kuduTable = kuduClient.openTable(tableName); this.operationCreater = getOperationCreater(kuduSinkConfig.mode, kuduTable); kuduSession.setFlushMode(SessionConfiguration.FlushMode.MANUAL_FLUSH); //kuduSession.setFlushInterval(); this.kuduSession.setMutationBufferSpace(this.mutationBufferSpace); //8m return true; }
Example #17
Source File: TestKuduTS.java From kudu-ts with Apache License 2.0 | 4 votes |
/** Tests writing and querying a single series with downsampling. */ @Test public void testSingleSeriesDownsample() throws Exception { try (KuduTS ts = KuduTS.openOrCreate(ImmutableList.of(getMasterAddresses()), "testSingleSeriesDownsample", CreateOptions.defaults())) { WriteBatch batch = ts.writeBatch(); batch.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND); try { String metric = "m"; SortedMap<String, String> tagset = tagset("k1", "v1"); Datapoints series = generateSeries(ts, metric, 420000, 10000, 100, tagset); for (Datapoint datapoint : series) { batch.writeDatapoint(metric, tagset, datapoint.getTime(), datapoint.getValue()); } batch.flush(); Query query = Query.create(metric, tagset, Aggregators.sum()) .setDownsampler(Aggregators.sum(), 20000); Datapoints results = ts.query(query).get(0); assertEquals(series.getMetric(), results.getMetric()); IntVec seriesTagsetIDs = series.getTagsetIDs().clone(); seriesTagsetIDs.sort(); IntVec resultsTagsetIDs = results.getTagsetIDs().clone(); resultsTagsetIDs.sort(); assertEquals(seriesTagsetIDs, results.getTagsetIDs()); assertEquals(50, results.size()); for (int i = 0; i < 50; i++) { assertEquals(series.getTime(i * 2), results.getTime(i)); assertEquals(series.getValue(2 * i) + series.getValue(2 * i + 1), results.getValue(i), 1e-9); } } finally { batch.close(); } } }
Example #18
Source File: Tags.java From kudu-ts with Apache License 2.0 | 4 votes |
/** * Insert a tagset into the {@code tags} table. * @param id the tagset ID. * @param tagset the tagset. * @return The tagset ID. */ public Deferred<Integer> insertTagset(final int id, final SortedMap<String, String> tagset) throws KuduException { if (tagset.isEmpty()) { return Deferred.fromResult(id); } LOG.debug("Inserting tags; tagsetID: {}, tags: {}", id, tagset); final AsyncKuduSession session = client.newSession(); class InsertTagsetCB implements Callback<Deferred<Integer>, List<OperationResponse>> { @Override public Deferred<Integer> call(List<OperationResponse> responses) { try { for (OperationResponse response : responses) { if (response.hasRowError()) { return Deferred.fromError(new RuntimeException( String.format("Unable to insert tag: %s", response.getRowError()))); } } return Deferred.fromResult(id); } finally { session.close(); } } @Override public String toString() { return MoreObjects.toStringHelper(this) .add("id", id) .add("tags", tagset) .toString(); } } if (tagset.size() > 1000) { session.setMutationBufferSpace(tagset.size()); } session.setMutationBufferLowWatermark(1.0f); // buffer all of the tags into the session, and ensure that we don't get // a PleaseThrottleException. In practice the number of tags should be // small. session.setMutationBufferSpace(tagset.size()); session.setMutationBufferLowWatermark(1.0f); session.setFlushMode(SessionConfiguration.FlushMode.MANUAL_FLUSH); for (Map.Entry<String, String> tag : tagset.entrySet()) { Insert insert = table.newInsert(); // TODO: check with JD that if the inserts below fail, the error will // also be returned in the flush call. insert.getRow().addString(Tables.TAGS_KEY_INDEX, tag.getKey()); insert.getRow().addString(Tables.TAGS_VALUE_INDEX, tag.getValue()); insert.getRow().addInt(Tables.TAGS_TAGSET_ID_INDEX, id); session.apply(insert); } return session.flush().addCallbackDeferring(new InsertTagsetCB()); }
Example #19
Source File: PutResource.java From kudu-ts with Apache License 2.0 | 4 votes |
@POST @Timed public Response put(@QueryParam("summary") @DefaultValue("false") BooleanFlag summary, @QueryParam("details") @DefaultValue("false") BooleanFlag details, @QueryParam("sync") @DefaultValue("false") BooleanFlag sync, @QueryParam("sync_timeout") @DefaultValue("120000") IntParam sync_timeout, JsonNode body) throws Exception { LOG.trace("put; summary: {}, details: {}, sync: {}, sync_timeout: {}, body: {}", summary, details, sync, sync_timeout, body); WriteBatch batch = ts.writeBatch(); try { batch.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND); if (sync_timeout.get() > 0) batch.setTimeoutMillis(sync_timeout.get()); int datapoints = 0; List<Error> errors = new ArrayList<>(); Iterator<JsonNode> nodes; if (body.isArray()) { nodes = body.elements(); } else { nodes = Iterators.singletonIterator(body); } while (nodes.hasNext()) { datapoints++; JsonNode node = nodes.next(); try { Datapoint datapoint = mapper.treeToValue(node, Datapoint.class); batch.writeDatapoint(datapoint.getMetric(), datapoint.getTags(), datapoint.getTimestamp(), datapoint.getValue()); } catch (JsonProcessingException e) { errors.add(new Error(node, e.getMessage())); } } batch.flush(); RowErrorsAndOverflowStatus batchErrors = batch.getPendingErrors(); for (RowError rowError : batchErrors.getRowErrors()) { errors.add(new Error(null, rowError.getErrorStatus().toString() + " (op " + rowError.getOperation().toString() + ")")); } if (errors.isEmpty()) { LOG.debug("put {} datapoints: {}", datapoints, body); return Response.noContent().build(); } else { LOG.error("failed to write {} of {} body: {}", errors.size(), datapoints, errors); if (details.get()) { Detail detail = new Detail(errors, errors.size(), datapoints - errors.size()); return Response.status(Response.Status.BAD_REQUEST).entity(detail).build(); } else if (summary.get()) { Summary s = new Summary(errors.size(), datapoints - errors.size()); return Response.status(Response.Status.BAD_REQUEST).entity(s).build(); } else { return Response.status(Response.Status.BAD_REQUEST).build(); } } } finally { batch.close(); } }
Example #20
Source File: ApexKuduConnection.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
public ApexKuduConnectionBuilder withFlushMode(SessionConfiguration.FlushMode flushMode) { this.flushMode = flushMode; isFlushModeSet = true; return this; }
Example #21
Source File: PutResource.java From kudu-ts with Apache License 2.0 | 4 votes |
public PutResource(KuduTS ts, ObjectMapper mapper) { this.ts = ts; this.mapper = mapper; this.batch = ts.writeBatch(); batch.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_BACKGROUND); }