org.apache.flume.instrumentation.SinkCounter Java Examples
The following examples show how to use
org.apache.flume.instrumentation.SinkCounter.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestBucketWriter.java From mt-flume with Apache License 2.0 | 6 votes |
@Test public void testEventCountingRoller() throws IOException, InterruptedException { int maxEvents = 100; MockHDFSWriter hdfsWriter = new MockHDFSWriter(); BucketWriter bucketWriter = new BucketWriter(0, 0, maxEvents, 0, ctx, "/tmp", "file", "", ".tmp", null, null, SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, null, new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000, Executors.newSingleThreadExecutor()); Event e = EventBuilder.withBody("foo", Charsets.UTF_8); for (int i = 0; i < 1000; i++) { bucketWriter.append(e); } logger.info("Number of events written: {}", hdfsWriter.getEventsWritten()); logger.info("Number of bytes written: {}", hdfsWriter.getBytesWritten()); logger.info("Number of files opened: {}", hdfsWriter.getFilesOpened()); Assert.assertEquals("events written", 1000, hdfsWriter.getEventsWritten()); Assert.assertEquals("bytes written", 3000, hdfsWriter.getBytesWritten()); Assert.assertEquals("files opened", 10, hdfsWriter.getFilesOpened()); }
Example #2
Source File: TestBucketWriter.java From mt-flume with Apache License 2.0 | 6 votes |
@Test public void testSizeRoller() throws IOException, InterruptedException { int maxBytes = 300; MockHDFSWriter hdfsWriter = new MockHDFSWriter(); BucketWriter bucketWriter = new BucketWriter(0, maxBytes, 0, 0, ctx, "/tmp", "file", "", ".tmp", null, null, SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, null, new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000, Executors.newSingleThreadExecutor()); Event e = EventBuilder.withBody("foo", Charsets.UTF_8); for (int i = 0; i < 1000; i++) { bucketWriter.append(e); } logger.info("Number of events written: {}", hdfsWriter.getEventsWritten()); logger.info("Number of bytes written: {}", hdfsWriter.getBytesWritten()); logger.info("Number of files opened: {}", hdfsWriter.getFilesOpened()); Assert.assertEquals("events written", 1000, hdfsWriter.getEventsWritten()); Assert.assertEquals("bytes written", 3000, hdfsWriter.getBytesWritten()); Assert.assertEquals("files opened", 10, hdfsWriter.getFilesOpened()); }
Example #3
Source File: TestBucketWriter.java From mt-flume with Apache License 2.0 | 6 votes |
@Test public void testInUseSuffix() throws IOException, InterruptedException { final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test final String SUFFIX = "WELCOME_TO_THE_HELLMOUNTH"; MockHDFSWriter hdfsWriter = new MockHDFSWriter(); HDFSTextSerializer serializer = new HDFSTextSerializer(); BucketWriter bucketWriter = new BucketWriter(ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", "", SUFFIX, null, null, SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, null, new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000, Executors.newSingleThreadExecutor()); Event e = EventBuilder.withBody("foo", Charsets.UTF_8); bucketWriter.append(e); Assert.assertTrue("Incorrect in use suffix", hdfsWriter.getOpenedFilePath().contains(SUFFIX)); }
Example #4
Source File: TestBucketWriter.java From mt-flume with Apache License 2.0 | 6 votes |
@Test public void testFileSuffixNotGiven() throws IOException, InterruptedException { final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test final String suffix = null; MockHDFSWriter hdfsWriter = new MockHDFSWriter(); BucketWriter bucketWriter = new BucketWriter(ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", "", ".tmp", suffix, null, SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, null, new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000, Executors.newSingleThreadExecutor()); // Need to override system time use for test so we know what to expect final long testTime = System.currentTimeMillis(); Clock testClock = new Clock() { public long currentTimeMillis() { return testTime; } }; bucketWriter.setClock(testClock); Event e = EventBuilder.withBody("foo", Charsets.UTF_8); bucketWriter.append(e); Assert.assertTrue("Incorrect suffix", hdfsWriter.getOpenedFilePath().endsWith(Long.toString(testTime+1) + ".tmp")); }
Example #5
Source File: TestBucketWriter.java From mt-flume with Apache License 2.0 | 6 votes |
@Test public void testInUsePrefix() throws IOException, InterruptedException { final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test final String PREFIX = "BRNO_IS_CITY_IN_CZECH_REPUBLIC"; MockHDFSWriter hdfsWriter = new MockHDFSWriter(); HDFSTextSerializer formatter = new HDFSTextSerializer(); BucketWriter bucketWriter = new BucketWriter(ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", PREFIX, ".tmp", null, null, SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, null, new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000, Executors.newSingleThreadExecutor()); Event e = EventBuilder.withBody("foo", Charsets.UTF_8); bucketWriter.append(e); Assert.assertTrue("Incorrect in use prefix", hdfsWriter.getOpenedFilePath().contains(PREFIX)); }
Example #6
Source File: BucketWriter.java From Transwarp-Sample-Code with MIT License | 5 votes |
BucketWriter(long rollInterval, long rollSize, long rollCount, long batchSize, Context context, String filePath, String fileName, String inUsePrefix, String inUseSuffix, String fileSuffix, CompressionCodec codeC, CompressionType compType, HDFSWriter writer, ScheduledExecutorService timedRollerPool, PrivilegedExecutor proxyUser, SinkCounter sinkCounter, int idleTimeout, HDFSSink.WriterCallback onCloseCallback, String onCloseCallbackPath, long callTimeout, ExecutorService callTimeoutPool, long retryInterval, int maxCloseTries) { this.rollInterval = rollInterval; this.rollSize = rollSize; this.rollCount = rollCount; this.batchSize = batchSize; this.filePath = filePath; this.fileName = fileName; this.inUsePrefix = inUsePrefix; this.inUseSuffix = inUseSuffix; this.fileSuffix = fileSuffix; this.codeC = codeC; this.compType = compType; this.writer = writer; this.timedRollerPool = timedRollerPool; this.proxyUser = proxyUser; this.sinkCounter = sinkCounter; this.idleTimeout = idleTimeout; this.onCloseCallback = onCloseCallback; this.onCloseCallbackPath = onCloseCallbackPath; this.callTimeout = callTimeout; this.callTimeoutPool = callTimeoutPool; fileExtensionCounter = new AtomicLong(clock.currentTimeMillis()); this.retryInterval = retryInterval; this.maxRenameTries = maxCloseTries; isOpen = false; isUnderReplicated = false; this.writer.configure(context); }
Example #7
Source File: PhoenixSink.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Override public void configure(Context context){ this.setName(NAME + counter.incrementAndGet()); this.batchSize = context.getInteger(FlumeConstants.CONFIG_BATCHSIZE, FlumeConstants.DEFAULT_BATCH_SIZE); final String eventSerializerType = context.getString(FlumeConstants.CONFIG_SERIALIZER); Preconditions.checkNotNull(eventSerializerType,"Event serializer cannot be empty, please specify in the configuration file"); initializeSerializer(context,eventSerializerType); this.sinkCounter = new SinkCounter(this.getName()); }
Example #8
Source File: DruidSink.java From ingestion with Apache License 2.0 | 5 votes |
@Override public void configure(Context context) { indexService = context.getString(INDEX_SERVICE); discoveryPath = context.getString(DISCOVERY_PATH); dimensions = Arrays.asList(context.getString(DIMENSIONS).split(",")); firehosePattern = context.getString(FIREHOSE_PATTERN, DEFAULT_FIREHOSE); dataSource = context.getString(DATA_SOURCE, DEFAUL_DATASOURCE); aggregators = AggregatorsHelper.build(context.getString(AGGREGATORS)); queryGranularity = QueryGranularityHelper.getGranularity(context.getString(QUERY_GRANULARITY, DEFAULT_QUERY_GRANULARITY)); segmentGranularity = Granularity.valueOf(context.getString(SEGMENT_GRANULARITY, DEFAULT_SEGMENT_GRANULARITY)); period = context.getString(WINDOW_PERIOD, DEFAULT_PERIOD); partitions = context.getInteger(PARTITIONS, DEFAULT_PARTITIONS); replicants = context.getInteger(REPLICANTS, DEFAULT_REPLICANTS); // Tranquility needs to be able to extract timestamps from your object type (in this case, Map<String, Object>). timestampField = context.getString(TIMESTAMP_FIELD, DEFAULT_TIMESTAMP_FIELD); zookeeperLocation = context.getString(ZOOKEEPER_LOCATION, DEFAULT_ZOOKEEPER_LOCATION); baseSleppTime = context.getInteger(ZOOKEEPPER_BASE_SLEEP_TIME, DEFAULT_ZOOKEEPER_BASE_SLEEP); maxRetries = context.getInteger(ZOOKEEPER_MAX_RETRIES, DEFAULT_ZOOKEEPER_MAX_RETRIES); maxSleep = context.getInteger(ZOOKEEPER_MAX_SLEEP, DEFAULT_ZOOKEEPER_MAX_SLEEP); batchSize = context.getInteger(BATCH_SIZE, DEFAULT_BATCH_SIZE); druidService = buildDruidService(); sinkCounter = new SinkCounter(this.getName()); eventParser = new EventParser(timestampField); }
Example #9
Source File: StratioDecisionSink.java From ingestion with Apache License 2.0 | 5 votes |
public void configure(Context context) { this.batchsize = context.getInteger(CONF_BATCH_SIZE, DEFAULT_BATCH_SIZE); this.sinkCounter = new SinkCounter(this.getName()); this.zookeeper = context.getString(ZOOKEEPER, DEFAULT_ZOOKEEPER); this.kafka = context.getString(KAFKA, DEFAULT_KAFKA); this.zkPath = context.getString(ZOOKEEPER_PATH,DEFAULT_ZKPATH); //if (context.getString(TOPIC) != null) // this.topic= context.getString(TOPIC); this.topic = context.getString(TOPIC, ""); log.info("Configuring Stratio Decision Sink: {zookeeper= " + this.zookeeper + ", kafka= " + this.kafka + ", topic= " + this.topic + ", batchSize= " + this.batchsize + ", sinkCounter= " + this.sinkCounter + "}"); //else //this.topic= ""; String columnDefinitionFile = context.getString(STREAM_DEFINITION_FILE); com.stratio.ingestion.sink.decision.StreamDefinitionParser parser = new StreamDefinitionParser(readJsonFromFile(new File(columnDefinitionFile))); StreamDefinition theStreamDefinition = parser.parse(); this.streamName = theStreamDefinition.getStreamName(); this.streamFields = theStreamDefinition.getFields(); try { this.stratioStreamingAPI = StratioStreamingAPIFactory.create() .withQuorumConfig(kafka, zookeeper,zkPath) .init(); } catch (StratioEngineConnectionException e) { throw new StratioDecisionSinkException(e); } }
Example #10
Source File: MongoSink.java From ingestion with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} * * @param context */ @Override public void configure(Context context) { try { if (!"INJECTED".equals(context.getString(CONF_URI))) { this.mongoClientURI = new MongoClientURI( context.getString(CONF_URI), MongoClientOptions.builder().writeConcern(WriteConcern.SAFE) ); this.mongoClient = new MongoClient(mongoClientURI); if (mongoClientURI.getDatabase() != null) { this.mongoDefaultDb = mongoClient.getDB(mongoClientURI.getDatabase()); } if (mongoClientURI.getCollection() != null) { this.mongoDefaultCollection = mongoDefaultDb.getCollection(mongoClientURI.getCollection()); } } final String mappingFilename = context.getString(CONF_MAPPING_FILE); this.eventParser = (mappingFilename == null) ? new EventParser() : new EventParser(MappingDefinition.load(mappingFilename)); this.isDynamicMode = context.getBoolean(CONF_DYNAMIC, DEFAULT_DYNAMIC); if (!isDynamicMode && mongoDefaultCollection == null) { throw new MongoSinkException("Default MongoDB collection must be specified unless dynamic mode is enabled"); } this.dynamicDBField = context.getString(CONF_DYNAMIC_DB_FIELD, DEFAULT_DYNAMIC_DB_FIELD); this.dynamicCollectionField = context.getString(CONF_DYNAMIC_COLLECTION_FIELD, DEFAULT_DYNAMIC_COLLECTION_FIELD); this.sinkCounter = new SinkCounter(this.getName()); this.batchSize = context.getInteger(CONF_BATCH_SIZE, DEFAULT_BATCH_SIZE); this.updateInsteadReplace = context.getBoolean(CONF_UPDATE_INSTEAD_REPLACE,DEFAULT_UPDATE_INSTEAD_REPLACE); } catch (IOException ex) { throw new MongoSinkException(ex); } }
Example #11
Source File: TestBucketWriter.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testFileSuffixCompressed() throws IOException, InterruptedException { final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test final String suffix = ".foo"; MockHDFSWriter hdfsWriter = new MockHDFSWriter(); BucketWriter bucketWriter = new BucketWriter(ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", "", ".tmp", suffix, HDFSEventSink.getCodec("gzip"), SequenceFile.CompressionType.BLOCK, hdfsWriter, timedRollerPool, null, new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000, Executors.newSingleThreadExecutor()); // Need to override system time use for test so we know what to expect final long testTime = System.currentTimeMillis(); Clock testClock = new Clock() { public long currentTimeMillis() { return testTime; } }; bucketWriter.setClock(testClock); Event e = EventBuilder.withBody("foo", Charsets.UTF_8); bucketWriter.append(e); Assert.assertTrue("Incorrect suffix",hdfsWriter.getOpenedFilePath() .endsWith(Long.toString(testTime+1) + suffix + ".tmp")); }
Example #12
Source File: TestBucketWriter.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testFileSuffixGiven() throws IOException, InterruptedException { final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test final String suffix = ".avro"; MockHDFSWriter hdfsWriter = new MockHDFSWriter(); BucketWriter bucketWriter = new BucketWriter(ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", "", ".tmp", suffix, null, SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, null, new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000, Executors.newSingleThreadExecutor()); // Need to override system time use for test so we know what to expect final long testTime = System.currentTimeMillis(); Clock testClock = new Clock() { public long currentTimeMillis() { return testTime; } }; bucketWriter.setClock(testClock); Event e = EventBuilder.withBody("foo", Charsets.UTF_8); bucketWriter.append(e); Assert.assertTrue("Incorrect suffix", hdfsWriter.getOpenedFilePath().endsWith(Long.toString(testTime+1) + suffix + ".tmp")); }
Example #13
Source File: BucketWriter.java From mt-flume with Apache License 2.0 | 5 votes |
BucketWriter(long rollInterval, long rollSize, long rollCount, long batchSize, Context context, String filePath, String fileName, String inUsePrefix, String inUseSuffix, String fileSuffix, CompressionCodec codeC, CompressionType compType, HDFSWriter writer, ScheduledExecutorService timedRollerPool, UserGroupInformation user, SinkCounter sinkCounter, int idleTimeout, WriterCallback onIdleCallback, String onIdleCallbackPath, long callTimeout, ExecutorService callTimeoutPool) { this.rollInterval = rollInterval; this.rollSize = rollSize; this.rollCount = rollCount; this.batchSize = batchSize; this.filePath = filePath; this.fileName = fileName; this.inUsePrefix = inUsePrefix; this.inUseSuffix = inUseSuffix; this.fileSuffix = fileSuffix; this.codeC = codeC; this.compType = compType; this.writer = writer; this.timedRollerPool = timedRollerPool; this.user = user; this.sinkCounter = sinkCounter; this.idleTimeout = idleTimeout; this.onIdleCallback = onIdleCallback; this.onIdleCallbackPath = onIdleCallbackPath; this.callTimeout = callTimeout; this.callTimeoutPool = callTimeoutPool; fileExtensionCounter = new AtomicLong(clock.currentTimeMillis()); isOpen = false; isUnderReplicated = false; this.writer.configure(context); }
Example #14
Source File: PhoenixSink.java From phoenix with Apache License 2.0 | 5 votes |
@Override public void configure(Context context){ this.setName(NAME + counter.incrementAndGet()); this.batchSize = context.getInteger(FlumeConstants.CONFIG_BATCHSIZE, FlumeConstants.DEFAULT_BATCH_SIZE); final String eventSerializerType = context.getString(FlumeConstants.CONFIG_SERIALIZER); Preconditions.checkNotNull(eventSerializerType,"Event serializer cannot be empty, please specify in the configuration file"); initializeSerializer(context,eventSerializerType); this.sinkCounter = new SinkCounter(this.getName()); }
Example #15
Source File: PulsarSink.java From pulsar-flume-ng-sink with Apache License 2.0 | 5 votes |
@Override public synchronized void start() { try{ log.info("start pulsar producer"); initPulsarClient(); initPulsarProducer(); this.counter = new SinkCounter("flume-sink"); super.start(); } catch (Exception e) { log.error("init pulsar failed:{}", e.getMessage()); } }
Example #16
Source File: TestBucketWriter.java From mt-flume with Apache License 2.0 | 4 votes |
@Test public void testIntervalRoller() throws IOException, InterruptedException { final int ROLL_INTERVAL = 1; // seconds final int NUM_EVENTS = 10; MockHDFSWriter hdfsWriter = new MockHDFSWriter(); BucketWriter bucketWriter = new BucketWriter(ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", "", ".tmp", null, null, SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, null, new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000, Executors.newSingleThreadExecutor()); Event e = EventBuilder.withBody("foo", Charsets.UTF_8); long startNanos = System.nanoTime(); for (int i = 0; i < NUM_EVENTS - 1; i++) { bucketWriter.append(e); } // sleep to force a roll... wait 2x interval just to be sure Thread.sleep(2 * ROLL_INTERVAL * 1000L); // write one more event (to reopen a new file so we will roll again later) bucketWriter.append(e); long elapsedMillis = TimeUnit.MILLISECONDS.convert( System.nanoTime() - startNanos, TimeUnit.NANOSECONDS); long elapsedSeconds = elapsedMillis / 1000L; logger.info("Time elapsed: {} milliseconds", elapsedMillis); logger.info("Number of events written: {}", hdfsWriter.getEventsWritten()); logger.info("Number of bytes written: {}", hdfsWriter.getBytesWritten()); logger.info("Number of files opened: {}", hdfsWriter.getFilesOpened()); logger.info("Number of files closed: {}", hdfsWriter.getFilesClosed()); Assert.assertEquals("events written", NUM_EVENTS, hdfsWriter.getEventsWritten()); Assert.assertEquals("bytes written", e.getBody().length * NUM_EVENTS, hdfsWriter.getBytesWritten()); Assert.assertEquals("files opened", 2, hdfsWriter.getFilesOpened()); // before auto-roll Assert.assertEquals("files closed", 1, hdfsWriter.getFilesClosed()); logger.info("Waiting for roll..."); Thread.sleep(2 * ROLL_INTERVAL * 1000L); logger.info("Number of files closed: {}", hdfsWriter.getFilesClosed()); Assert.assertEquals("files closed", 2, hdfsWriter.getFilesClosed()); }
Example #17
Source File: HBaseSink.java From mt-flume with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") @Override public void configure(Context context){ tableName = context.getString(HBaseSinkConfigurationConstants.CONFIG_TABLE); String cf = context.getString( HBaseSinkConfigurationConstants.CONFIG_COLUMN_FAMILY); batchSize = context.getLong( HBaseSinkConfigurationConstants.CONFIG_BATCHSIZE, new Long(100)); serializerContext = new Context(); //If not specified, will use HBase defaults. eventSerializerType = context.getString( HBaseSinkConfigurationConstants.CONFIG_SERIALIZER); Preconditions.checkNotNull(tableName, "Table name cannot be empty, please specify in configuration file"); Preconditions.checkNotNull(cf, "Column family cannot be empty, please specify in configuration file"); //Check foe event serializer, if null set event serializer type if(eventSerializerType == null || eventSerializerType.isEmpty()) { eventSerializerType = "org.apache.flume.sink.hbase.SimpleHbaseEventSerializer"; logger.info("No serializer defined, Will use default"); } serializerContext.putAll(context.getSubProperties( HBaseSinkConfigurationConstants.CONFIG_SERIALIZER_PREFIX)); columnFamily = cf.getBytes(Charsets.UTF_8); try { Class<? extends HbaseEventSerializer> clazz = (Class<? extends HbaseEventSerializer>) Class.forName(eventSerializerType); serializer = clazz.newInstance(); serializer.configure(serializerContext); } catch (Exception e) { logger.error("Could not instantiate event serializer." , e); Throwables.propagate(e); } kerberosKeytab = context.getString(HBaseSinkConfigurationConstants.CONFIG_KEYTAB, ""); kerberosPrincipal = context.getString(HBaseSinkConfigurationConstants.CONFIG_PRINCIPAL, ""); enableWal = context.getBoolean(HBaseSinkConfigurationConstants .CONFIG_ENABLE_WAL, HBaseSinkConfigurationConstants.DEFAULT_ENABLE_WAL); logger.info("The write to WAL option is set to: " + String.valueOf(enableWal)); if(!enableWal) { logger.warn("HBase Sink's enableWal configuration is set to false. All " + "writes to HBase will have WAL disabled, and any data in the " + "memstore of this region in the Region Server could be lost!"); } sinkCounter = new SinkCounter(this.getName()); }
Example #18
Source File: SinkOfFlume.java From pulsar with Apache License 2.0 | 4 votes |
@Override public synchronized void start() { records = new LinkedBlockingQueue<Map<String, Object>>(); this.counter = new SinkCounter("flume-sink"); }
Example #19
Source File: DatasetSink.java From kite with Apache License 2.0 | 4 votes |
@Override public void configure(Context context) { // initialize login credentials this.login = KerberosUtil.login( context.getString(DatasetSinkConstants.AUTH_PRINCIPAL), context.getString(DatasetSinkConstants.AUTH_KEYTAB)); String effectiveUser = context.getString(DatasetSinkConstants.AUTH_PROXY_USER); if (effectiveUser != null) { this.login = KerberosUtil.proxyAs(effectiveUser, login); } String datasetURI = context.getString( DatasetSinkConstants.CONFIG_KITE_DATASET_URI); if (datasetURI != null) { this.target = URI.create(datasetURI); this.datasetName = uriToName(target); } else { String repositoryURI = context.getString( DatasetSinkConstants.CONFIG_KITE_REPO_URI); Preconditions.checkNotNull(repositoryURI, "Repository URI is missing"); this.datasetName = context.getString( DatasetSinkConstants.CONFIG_KITE_DATASET_NAME); Preconditions.checkNotNull(datasetName, "Dataset name is missing"); this.target = new URIBuilder(repositoryURI, URIBuilder.NAMESPACE_DEFAULT, datasetName).build(); } this.setName(target.toString()); // other configuration this.batchSize = context.getLong( DatasetSinkConstants.CONFIG_KITE_BATCH_SIZE, DatasetSinkConstants.DEFAULT_BATCH_SIZE); this.rollIntervalS = context.getInteger( DatasetSinkConstants.CONFIG_KITE_ROLL_INTERVAL, DatasetSinkConstants.DEFAULT_ROLL_INTERVAL); this.counter = new SinkCounter(datasetName); }