Java Code Examples for org.apache.flume.Transaction#begin()
The following examples show how to use
org.apache.flume.Transaction#begin() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestThriftSource.java From mt-flume with Apache License 2.0 | 6 votes |
@Test public void testAppend() throws Exception { client = RpcClientFactory.getThriftInstance(props); Context context = new Context(); channel.configure(context); configureSource(); context.put(ThriftSource.CONFIG_BIND, "0.0.0.0"); context.put(ThriftSource.CONFIG_PORT, String.valueOf(port)); Configurables.configure(source, context); source.start(); for(int i = 0; i < 30; i++) { client.append(EventBuilder.withBody(String.valueOf(i).getBytes())); } Transaction transaction = channel.getTransaction(); transaction.begin(); for (int i = 0; i < 30; i++) { Event event = channel.take(); Assert.assertNotNull(event); Assert.assertEquals(String.valueOf(i), new String(event.getBody())); } transaction.commit(); transaction.close(); }
Example 2
Source File: StringSinkTests.java From pulsar with Apache License 2.0 | 6 votes |
@Test public void TestOpenAndWriteSink() throws Exception { Map<String, Object> conf = Maps.newHashMap(); StringSink stringSink = new StringSink(); conf.put("name", "a1"); conf.put("confFile", "./src/test/resources/flume/source.conf"); conf.put("noReloadConf", false); conf.put("zkConnString", ""); conf.put("zkBasePath", ""); stringSink.open(conf, mockSinkContext); send(stringSink, 100); Thread.sleep(3 * 1000); Transaction transaction = channel.getTransaction(); transaction.begin(); Event event = channel.take(); Assert.assertNotNull(event); Assert.assertNotNull(mockRecord); verify(mockRecord, times(100)).ack(); transaction.commit(); transaction.close(); }
Example 3
Source File: TestAsyncHBaseSink.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testOneEvent() throws Exception { testUtility.createTable(tableName.getBytes(), columnFamily.getBytes()); deleteTable = true; AsyncHBaseSink sink = new AsyncHBaseSink(testUtility.getConfiguration()); Configurables.configure(sink, ctx); Channel channel = new MemoryChannel(); Configurables.configure(channel, ctx); sink.setChannel(channel); sink.start(); Transaction tx = channel.getTransaction(); tx.begin(); Event e = EventBuilder.withBody( Bytes.toBytes(valBase)); channel.put(e); tx.commit(); tx.close(); Assert.assertFalse(sink.isConfNull()); sink.process(); sink.stop(); HTable table = new HTable(testUtility.getConfiguration(), tableName); byte[][] results = getResults(table, 1); byte[] out = results[0]; Assert.assertArrayEquals(e.getBody(), out); out = results[1]; Assert.assertArrayEquals(Longs.toByteArray(1), out); }
Example 4
Source File: TestBasicChannelSemantics.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testRollback5() throws Exception { final Transaction transaction = channel.getTransaction(); testIllegalState(new Runnable() { @Override public void run() { transaction.rollback(); } }); transaction.begin(); testInterrupt(new Runnable() { @Override public void run() { transaction.rollback(); } }); testIllegalState(new Runnable() { @Override public void run() { transaction.rollback(); } }); transaction.close(); testIllegalState(new Runnable() { @Override public void run() { transaction.rollback(); } }); }
Example 5
Source File: TestFlumeFailoverTarget.java From datacollector with Apache License 2.0 | 5 votes |
@Test public void testWriteStringRecordsFromJSON3() throws InterruptedException, StageException, IOException { DataGeneratorFormatConfig dataGeneratorFormatConfig = new DataGeneratorFormatConfig(); dataGeneratorFormatConfig.textFieldPath = "/"; //MAP dataGeneratorFormatConfig.charset = "UTF-8"; dataGeneratorFormatConfig.textEmptyLineIfNull = true; FlumeTarget flumeTarget = FlumeTestUtil.createFlumeTarget( FlumeTestUtil.createDefaultFlumeConfig(port, false), DataFormat.TEXT, dataGeneratorFormatConfig ); TargetRunner targetRunner = new TargetRunner.Builder(FlumeDTarget.class, flumeTarget) .setOnRecordError(OnRecordError.TO_ERROR) .build(); targetRunner.runInit(); List<Record> logRecords = FlumeTestUtil.createJsonRecords(); targetRunner.runWrite(logRecords); //All records must be sent to error Assert.assertEquals(logRecords.size(), targetRunner.getErrorRecords().size()); targetRunner.runDestroy(); Transaction transaction = ch.getTransaction(); transaction.begin(); Event event = ch.take(); Assert.assertNull(event); transaction.commit(); transaction.close(); }
Example 6
Source File: KafkaSink.java From ingestion with Apache License 2.0 | 5 votes |
@Override public Status process() throws EventDeliveryException { Channel channel = getChannel(); Transaction tx = channel.getTransaction(); try { tx.begin(); Event event = channel.take(); if (event == null) { tx.commit(); return Status.READY; } String data = null; if(writeBody){ data = new String(event.getBody()); } else { data = mapper.writeValueAsString(event.getHeaders()); } producer.send(new KeyedMessage<String, String>(topic, data)); tx.commit(); return Status.READY; } catch (Exception e) { try { tx.rollback(); return Status.BACKOFF; } catch (Exception e2) { log.error("Rollback Exception:{}", e2); } log.error("KafkaSink Exception:{}", e); return Status.BACKOFF; } finally { tx.close(); } }
Example 7
Source File: DruidSink.java From ingestion with Apache License 2.0 | 5 votes |
@Override public Status process() throws EventDeliveryException { List<Event> events; List<Map<String, Object>> parsedEvents; Status status = Status.BACKOFF; Transaction transaction = this.getChannel().getTransaction(); try { transaction.begin(); events = takeEventsFromChannel(this.getChannel(), batchSize); status = Status.READY; if (!events.isEmpty()) { updateSinkCounters(events); parsedEvents = eventParser.parse(events); sendEvents(parsedEvents); sinkCounter.addToEventDrainSuccessCount(events.size()); } else { sinkCounter.incrementBatchEmptyCount(); } transaction.commit(); status = Status.READY; } catch (ChannelException e) { e.printStackTrace(); transaction.rollback(); status = Status.BACKOFF; this.sinkCounter.incrementConnectionFailedCount(); } catch (Throwable t) { t.printStackTrace(); transaction.rollback(); status = Status.BACKOFF; if (t instanceof Error) { LOG.error(t.getMessage()); throw new DruidSinkException("An error occurred during processing events to be stored in druid", t); } } finally { transaction.close(); } return status; }
Example 8
Source File: RegexEventSerializerIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testMissingColumnsInEvent() throws EventDeliveryException, SQLException { final String fullTableName = "FLUME_TEST"; initSinkContextWithDefaults(fullTableName); sink = new PhoenixSink(); Configurables.configure(sink, sinkContext); assertEquals(LifecycleState.IDLE, sink.getLifecycleState()); final Channel channel = this.initChannel(); sink.setChannel(channel); sink.start(); final String eventBody = "val1"; final Event event = EventBuilder.withBody(Bytes.toBytes(eventBody)); // put event in channel Transaction transaction = channel.getTransaction(); transaction.begin(); channel.put(event); transaction.commit(); transaction.close(); sink.process(); int rowsInDb = countRows(fullTableName); assertEquals(0 , rowsInDb); sink.stop(); assertEquals(LifecycleState.STOP, sink.getLifecycleState()); }
Example 9
Source File: TestHTTPSource.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testSimple() throws IOException, InterruptedException { StringEntity input = new StringEntity("[{\"headers\":{\"a\": \"b\"},\"body\": \"random_body\"}," + "{\"headers\":{\"e\": \"f\"},\"body\": \"random_body2\"}]"); //if we do not set the content type to JSON, the client will use //ISO-8859-1 as the charset. JSON standard does not support this. input.setContentType("application/json"); postRequest.setEntity(input); HttpResponse response = httpClient.execute(postRequest); Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode()); Transaction tx = channel.getTransaction(); tx.begin(); Event e = channel.take(); Assert.assertNotNull(e); Assert.assertEquals("b", e.getHeaders().get("a")); Assert.assertEquals("random_body", new String(e.getBody(), "UTF-8")); e = channel.take(); Assert.assertNotNull(e); Assert.assertEquals("f", e.getHeaders().get("e")); Assert.assertEquals("random_body2", new String(e.getBody(), "UTF-8")); tx.commit(); tx.close(); }
Example 10
Source File: TestBasicChannelSemantics.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testTake3() throws Exception { Transaction transaction = channel.getTransaction(); transaction.begin(); channel.take(); final Transaction finalTransaction = transaction; testChannelException(new Runnable() { @Override public void run() { finalTransaction.commit(); } }); transaction.rollback(); testIllegalState(new Runnable() { @Override public void run() { channel.take(); } }); transaction.close(); testIllegalState(new Runnable() { @Override public void run() { channel.take(); } }); }
Example 11
Source File: TestElasticSearchSink.java From ingestion with Apache License 2.0 | 5 votes |
@Test public void shouldAllowCustomElasticSearchIndexRequestBuilderFactory() throws Exception { parameters.put(SERIALIZER, CustomElasticSearchIndexRequestBuilderFactory.class.getName()); fixture.configure(new Context(parameters)); Channel channel = bindAndStartChannel(fixture); Transaction tx = channel.getTransaction(); tx.begin(); String body = "{ foo: \"bar\" }"; Event event = EventBuilder.withBody(body.getBytes()); channel.put(event); tx.commit(); tx.close(); fixture.process(); fixture.stop(); assertEquals(fixture.getIndexName() + "-05_17_36_789", CustomElasticSearchIndexRequestBuilderFactory.actualIndexName); assertEquals(fixture.getIndexType(), CustomElasticSearchIndexRequestBuilderFactory.actualIndexType); assertArrayEquals(event.getBody(), CustomElasticSearchIndexRequestBuilderFactory.actualEventBody); assertTrue(CustomElasticSearchIndexRequestBuilderFactory.hasContext); }
Example 12
Source File: TestFlumeFailoverTarget.java From datacollector with Apache License 2.0 | 5 votes |
@Test public void testWriteAvroRecordsDropSchema() throws InterruptedException, StageException, IOException { DataGeneratorFormatConfig dataGeneratorFormatConfig = new DataGeneratorFormatConfig(); dataGeneratorFormatConfig.avroSchema = SdcAvroTestUtil.AVRO_SCHEMA1; dataGeneratorFormatConfig.avroSchemaSource = INLINE; dataGeneratorFormatConfig.includeSchema = false; dataGeneratorFormatConfig.avroCompression = AvroCompression.NULL; FlumeTarget flumeTarget = FlumeTestUtil.createFlumeTarget( FlumeTestUtil.createDefaultFlumeConfig(port, false), DataFormat.AVRO, dataGeneratorFormatConfig ); TargetRunner targetRunner = new TargetRunner.Builder(FlumeDTarget.class, flumeTarget).build(); targetRunner.runInit(); List<Record> records = SdcAvroTestUtil.getRecords1(); targetRunner.runWrite(records); targetRunner.runDestroy(); List<GenericRecord> genericRecords = new ArrayList<>(); DatumReader<GenericRecord> datumReader = new GenericDatumReader<>(); //Reader schema argument is optional datumReader.setSchema(new Schema.Parser().parse(SdcAvroTestUtil.AVRO_SCHEMA1)); Transaction transaction = ch.getTransaction(); transaction.begin(); Event event = ch.take(); while(event != null) { BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(event.getBody(), null); GenericRecord read = datumReader.read(null, decoder); genericRecords.add(read); event = ch.take(); } transaction.commit(); transaction.close(); Assert.assertEquals(3, genericRecords.size()); SdcAvroTestUtil.compare1(genericRecords); }
Example 13
Source File: MongoSinkTest.java From ingestion with Apache License 2.0 | 5 votes |
@Test(expected = MongoSinkException.class) public void errorOnProcess() throws Exception { DBCollection mockedCollection = Mockito.mock(DBCollection.class); Mockito.when(mockedCollection.save(Mockito.any(DBObject.class))).thenThrow(Error.class); setField(mongoSink, "mongoDefaultCollection", mockedCollection); Transaction tx = channel.getTransaction(); tx.begin(); Event event = EventBuilder.withBody("{ }".getBytes(Charsets.UTF_8)); channel.put(event); tx.commit(); tx.close(); mongoSink.process(); }
Example 14
Source File: TestHBaseSink.java From mt-flume with Apache License 2.0 | 4 votes |
/** * This test must run last - it shuts down the minicluster :D * @throws Exception */ @Ignore("For dev builds only:" + "This test takes too long, and this has to be run after all other" + "tests, since it shuts down the minicluster. " + "Comment out all other tests" + "and uncomment this annotation to run this test.") @Test(expected = EventDeliveryException.class) public void testHBaseFailure() throws Exception { ctx.put("batchSize", "2"); testUtility.createTable(tableName.getBytes(), columnFamily.getBytes()); HBaseSink sink = new HBaseSink(testUtility.getConfiguration()); Configurables.configure(sink, ctx); //Reset the context to a higher batchSize ctx.put("batchSize", "100"); Channel channel = new MemoryChannel(); Configurables.configure(channel, new Context()); sink.setChannel(channel); sink.start(); Transaction tx = channel.getTransaction(); tx.begin(); for(int i = 0; i < 3; i++){ Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i)); channel.put(e); } tx.commit(); tx.close(); sink.process(); HTable table = new HTable(testUtility.getConfiguration(), tableName); byte[][] results = getResults(table, 2); byte[] out; int found = 0; for(int i = 0; i < 2; i++){ for(int j = 0; j < 2; j++){ if(Arrays.equals(results[j],Bytes.toBytes(valBase + "-" + i))){ found++; break; } } } Assert.assertEquals(2, found); out = results[2]; Assert.assertArrayEquals(Longs.toByteArray(2), out); testUtility.shutdownMiniCluster(); sink.process(); sink.stop(); }
Example 15
Source File: TestHBaseSink.java From mt-flume with Apache License 2.0 | 4 votes |
@Test public void testMultipleBatches() throws Exception { testUtility.createTable(tableName.getBytes(), columnFamily.getBytes()); ctx.put("batchSize", "2"); HBaseSink sink = new HBaseSink(testUtility.getConfiguration()); Configurables.configure(sink, ctx); //Reset the context to a higher batchSize ctx.put("batchSize", "100"); Channel channel = new MemoryChannel(); Configurables.configure(channel, new Context()); sink.setChannel(channel); sink.start(); Transaction tx = channel.getTransaction(); tx.begin(); for(int i = 0; i < 3; i++){ Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i)); channel.put(e); } tx.commit(); tx.close(); int count = 0; Status status = Status.READY; while(status != Status.BACKOFF){ count++; status = sink.process(); } sink.stop(); Assert.assertEquals(2, count); HTable table = new HTable(testUtility.getConfiguration(), tableName); byte[][] results = getResults(table, 3); byte[] out; int found = 0; for(int i = 0; i < 3; i++){ for(int j = 0; j < 3; j++){ if(Arrays.equals(results[j],Bytes.toBytes(valBase + "-" + i))){ found++; break; } } } Assert.assertEquals(3, found); out = results[3]; Assert.assertArrayEquals(Longs.toByteArray(3), out); testUtility.deleteTable(tableName.getBytes()); }
Example 16
Source File: TestFlumeLoadBalancingTarget.java From datacollector with Apache License 2.0 | 4 votes |
@Test public void testWriteStringRecordsRoundRobin() throws StageException { DataGeneratorFormatConfig dataGeneratorFormatConfig = new DataGeneratorFormatConfig(); dataGeneratorFormatConfig.textFieldPath = "/"; dataGeneratorFormatConfig.textEmptyLineIfNull = false; FlumeTarget flumeTarget = FlumeTestUtil.createFlumeTarget( FlumeTestUtil.createFlumeConfig( false, // backOff 100, // batchSize ClientType.AVRO_LOAD_BALANCING, 2000, // connection timeout flumeHosts, HostSelectionStrategy.ROUND_ROBIN, -1, // maxBackOff 1, // maxRetryAttempts 2000, // requestTimeout false, // singleEventPerBatch 0 ), DataFormat.TEXT, dataGeneratorFormatConfig ); TargetRunner targetRunner = new TargetRunner.Builder(FlumeDTarget.class, flumeTarget).build(); targetRunner.runInit(); List<List<Record>> logRecords = new ArrayList<>(NUM_HOSTS); for(int i = 0; i < NUM_HOSTS; i++) { logRecords.add(FlumeTestUtil.createStringRecords()); } for(int i = 0; i < NUM_HOSTS; i++) { targetRunner.runWrite(logRecords.get(i)); } targetRunner.runDestroy(); for(int i = 0;i < logRecords.size(); i++) { Channel channel = chs.get(i % NUM_HOSTS); List<Record> records = logRecords.get(i); for(int j = 0; j < records.size(); j++) { Transaction transaction = channel.getTransaction(); transaction.begin(); Event event = channel.take(); Assert.assertNotNull(event); Assert.assertEquals(records.get(j).get().getValueAsString(), new String(event.getBody()).trim()); Assert.assertTrue(event.getHeaders().containsKey("charset")); Assert.assertEquals("UTF-8", event.getHeaders().get("charset")); transaction.commit(); transaction.close(); } } }
Example 17
Source File: JDBCSinkTest.java From ingestion with Apache License 2.0 | 4 votes |
@Test public void mappedWithH2() throws Exception { Class.forName("org.h2.Driver").newInstance(); Connection conn = DriverManager.getConnection("jdbc:h2:/tmp/jdbcsink_test"); conn.prepareStatement("DROP TABLE public.test IF EXISTS;").execute(); conn.prepareStatement("CREATE TABLE public.test (myInteger INTEGER, myString VARCHAR, myId BIGINT AUTO_INCREMENT PRIMARY KEY);").execute(); conn.commit(); conn.close(); Context ctx = new Context(); ctx.put("driver", "org.h2.Driver"); ctx.put("connectionString", "jdbc:h2:/tmp/jdbcsink_test"); ctx.put("table", "test"); ctx.put("sqlDialect", "H2"); ctx.put("batchSize", "1"); JDBCSink jdbcSink = new JDBCSink(); Configurables.configure(jdbcSink, ctx); Context channelContext = new Context(); channelContext.put("capacity", "10000"); channelContext.put("transactionCapacity", "200"); Channel channel = new MemoryChannel(); channel.setName("junitChannel"); Configurables.configure(channel, channelContext); jdbcSink.setChannel(channel); channel.start(); jdbcSink.start(); Transaction tx = channel.getTransaction(); tx.begin(); Map<String, String> headers = new HashMap<String, String>(); headers.put("myString", "bar"); // Overwrites the value defined in JSON body headers.put("myInteger", "64"); headers.put("myBoolean", "true"); headers.put("myDouble", "1.0"); headers.put("myNull", "foobar"); Date myDate = new Date(); headers.put("myDate", Long.toString(myDate.getTime())); headers.put("myString2", "baz"); Event event = EventBuilder.withBody(new byte[0], headers); channel.put(event); tx.commit(); tx.close(); jdbcSink.process(); jdbcSink.stop(); channel.stop(); conn = DriverManager.getConnection("jdbc:h2:/tmp/jdbcsink_test"); ResultSet resultSet = conn.prepareStatement("SELECT * FROM public.test").executeQuery(); resultSet.next(); assertThat(resultSet.getInt("myInteger")).isEqualTo(64); assertThat(resultSet.getString("myString")).isEqualTo("bar"); conn.close(); }
Example 18
Source File: JDBCSinkTest.java From ingestion with Apache License 2.0 | 4 votes |
/** * Sample code to validate postgresql integration with JDBC sink * Ignored until we get a Docker Postgresql and move to integration test * @throws Exception */ @Test @Ignore public void templateWithPostgres() throws Exception { Class.forName("org.postgresql.Driver").newInstance(); Connection connTruncate = DriverManager.getConnection("jdbc:postgresql://10.200.0.126:5432/test_erige?user=postgres&password="); connTruncate.prepareStatement("TRUNCATE tc40;").execute(); //conn.prepareStatement("CREATE TABLE tc40 (myInteger INTEGER, myString VARCHAR, myId BIGINT AUTO_INCREMENT " // + "PRIMARY KEY);").execute(); //connTruncate.commit(); connTruncate.close(); Context ctx = new Context(); ctx.put("driver", "org.postgresql.Driver"); ctx.put("connectionString", "jdbc:postgresql://10.200.0.126:5432/test_erige?user=postgres&password="); ctx.put("sqlDialect", "POSTGRES"); ctx.put("table", "tc40"); ctx.put("username", "postgres"); ctx.put("batchSize", "1"); ctx.put("sql", "INSERT INTO \"tc40\" (\"arn\", \"account_number\") VALUES " + "(${header.arn:varchar}, ${header.account_number:varchar})"); JDBCSink jdbcSink = new JDBCSink(); Configurables.configure(jdbcSink, ctx); Context channelContext = new Context(); channelContext.put("capacity", "10000"); channelContext.put("transactionCapacity", "200"); Channel channel = new MemoryChannel(); channel.setName("junitChannel"); Configurables.configure(channel, channelContext); jdbcSink.setChannel(channel); channel.start(); jdbcSink.start(); Transaction tx = channel.getTransaction(); tx.begin(); Map<String, String> headers = new HashMap<String, String>(); headers.put("arn", "bar"); // Overwrites the value defined in JSON body headers.put("account_number", "account number"); headers.put("dsadas", "dsadasdas"); Event event = EventBuilder.withBody(new byte[0], headers); channel.put(event); tx.commit(); tx.close(); jdbcSink.process(); jdbcSink.stop(); channel.stop(); //Connection conn = DriverManager.getConnection("jdbc:h2:/tmp/jdbcsink_test"); Connection conn = DriverManager.getConnection("jdbc:postgresql://10.200.0" + ".126:5432/test_erige?user=postgres&password="); ResultSet rs = conn.prepareStatement("SELECT count(*) AS count FROM tc40").executeQuery(); rs.next(); assertThat(rs.getInt("count")).isEqualTo(1); rs = conn.prepareStatement("SELECT * FROM tc40").executeQuery(); rs.next(); // for (int i = 1; i <= 3; i++) { // System.out.println(rs.getString(i)); // } assertThat(rs.getString("arn")).isEqualTo("bar"); conn.close(); }
Example 19
Source File: TestBasicChannelSemantics.java From mt-flume with Apache License 2.0 | 4 votes |
@Test public void testPut1() throws Exception { testIllegalState(new Runnable() { @Override public void run() { channel.put(events.get(0)); } }); Transaction transaction = channel.getTransaction(); testIllegalState(new Runnable() { @Override public void run() { channel.put(events.get(0)); } }); transaction.begin(); channel.put(events.get(0)); testIllegalArgument(new Runnable() { @Override public void run() { channel.put(null); } }); testExceptions(new Runnable() { @Override public void run() { channel.put(events.get(0)); } }); transaction.commit(); testIllegalState(new Runnable() { @Override public void run() { channel.put(events.get(0)); } }); transaction.close(); testIllegalState(new Runnable() { @Override public void run() { channel.put(events.get(0)); } }); }
Example 20
Source File: TestHDFSEventSink.java From mt-flume with Apache License 2.0 | 4 votes |
@Test public void testAvroAppend() throws InterruptedException, LifecycleException, EventDeliveryException, IOException { LOG.debug("Starting..."); final long rollCount = 3; final long batchSize = 2; final String fileName = "FlumeData"; String newPath = testPath + "/singleTextBucket"; int totalEvents = 0; int i = 1, j = 1; // clear the test directory Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); Path dirPath = new Path(newPath); fs.delete(dirPath, true); fs.mkdirs(dirPath); Context context = new Context(); // context.put("hdfs.path", testPath + "/%Y-%m-%d/%H"); context.put("hdfs.path", newPath); context.put("hdfs.filePrefix", fileName); context.put("hdfs.rollCount", String.valueOf(rollCount)); context.put("hdfs.batchSize", String.valueOf(batchSize)); context.put("hdfs.writeFormat", "Text"); context.put("hdfs.fileType", "DataStream"); context.put("serializer", "AVRO_EVENT"); Configurables.configure(sink, context); Channel channel = new MemoryChannel(); Configurables.configure(channel, context); sink.setChannel(channel); sink.start(); Calendar eventDate = Calendar.getInstance(); List<String> bodies = Lists.newArrayList(); // push the event batches into channel for (i = 1; i < 4; i++) { Transaction txn = channel.getTransaction(); txn.begin(); for (j = 1; j <= batchSize; j++) { Event event = new SimpleEvent(); eventDate.clear(); eventDate.set(2011, i, i, i, 0); // yy mm dd event.getHeaders().put("timestamp", String.valueOf(eventDate.getTimeInMillis())); event.getHeaders().put("hostname", "Host" + i); String body = "Test." + i + "." + j; event.setBody(body.getBytes()); bodies.add(body); channel.put(event); totalEvents++; } txn.commit(); txn.close(); // execute sink to process the events sink.process(); } sink.stop(); // loop through all the files generated and check their contains FileStatus[] dirStat = fs.listStatus(dirPath); Path fList[] = FileUtil.stat2Paths(dirStat); // check that the roll happened correctly for the given data long expectedFiles = totalEvents / rollCount; if (totalEvents % rollCount > 0) expectedFiles++; Assert.assertEquals("num files wrong, found: " + Lists.newArrayList(fList), expectedFiles, fList.length); verifyOutputAvroFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies); }