Java Code Examples for org.apache.flume.Transaction#commit()
The following examples show how to use
org.apache.flume.Transaction#commit() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestAsyncHBaseSink.java From mt-flume with Apache License 2.0 | 6 votes |
@Test (expected = EventDeliveryException.class) public void testTimeOut() throws Exception { testUtility.createTable(tableName.getBytes(), columnFamily.getBytes()); deleteTable = true; AsyncHBaseSink sink = new AsyncHBaseSink(testUtility.getConfiguration(), true); Configurables.configure(sink, ctx); Channel channel = new MemoryChannel(); Configurables.configure(channel, ctx); sink.setChannel(channel); sink.start(); Transaction tx = channel.getTransaction(); tx.begin(); for(int i = 0; i < 3; i++){ Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i)); channel.put(e); } tx.commit(); tx.close(); Assert.assertFalse(sink.isConfNull()); sink.process(); Assert.fail(); }
Example 2
Source File: TestMemoryChannel.java From mt-flume with Apache License 2.0 | 6 votes |
@Test public void testNullEmptyEvent() { Context context = new Context(); Map<String, String> parms = new HashMap<String, String>(); parms.put("byteCapacity", "2000"); parms.put("byteCapacityBufferPercentage", "20"); context.putAll(parms); Configurables.configure(channel, context); Transaction tx = channel.getTransaction(); tx.begin(); //This line would cause a NPE without FLUME-1622. channel.put(EventBuilder.withBody(null)); tx.commit(); tx.close(); tx = channel.getTransaction(); tx.begin(); channel.put(EventBuilder.withBody(new byte[0])); tx.commit(); tx.close(); }
Example 3
Source File: TestMemoryChannel.java From mt-flume with Apache License 2.0 | 6 votes |
@Test public void testCapacityBufferEmptyingAfterRollback() { Context context = new Context(); Map<String, String> parms = new HashMap<String, String>(); parms.put("capacity", "3"); parms.put("transactionCapacity", "3"); context.putAll(parms); Configurables.configure(channel, context); Transaction tx = channel.getTransaction(); tx.begin(); channel.put(EventBuilder.withBody("test".getBytes())); channel.put(EventBuilder.withBody("test".getBytes())); channel.put(EventBuilder.withBody("test".getBytes())); tx.rollback(); tx.close(); tx = channel.getTransaction(); tx.begin(); channel.put(EventBuilder.withBody("test".getBytes())); channel.put(EventBuilder.withBody("test".getBytes())); channel.put(EventBuilder.withBody("test".getBytes())); tx.commit(); tx.close(); }
Example 4
Source File: TestThriftSource.java From mt-flume with Apache License 2.0 | 6 votes |
@Test public void testAppend() throws Exception { client = RpcClientFactory.getThriftInstance(props); Context context = new Context(); channel.configure(context); configureSource(); context.put(ThriftSource.CONFIG_BIND, "0.0.0.0"); context.put(ThriftSource.CONFIG_PORT, String.valueOf(port)); Configurables.configure(source, context); source.start(); for(int i = 0; i < 30; i++) { client.append(EventBuilder.withBody(String.valueOf(i).getBytes())); } Transaction transaction = channel.getTransaction(); transaction.begin(); for (int i = 0; i < 30; i++) { Event event = channel.take(); Assert.assertNotNull(event); Assert.assertEquals(String.valueOf(i), new String(event.getBody())); } transaction.commit(); transaction.close(); }
Example 5
Source File: TestElasticSearchSink.java From mt-flume with Apache License 2.0 | 6 votes |
@Test public void shouldIndexOneEvent() throws Exception { Configurables.configure(fixture, new Context(parameters)); Channel channel = bindAndStartChannel(fixture); Transaction tx = channel.getTransaction(); tx.begin(); Event event = EventBuilder.withBody("event #1 or 1".getBytes()); channel.put(event); tx.commit(); tx.close(); fixture.process(); fixture.stop(); client.admin().indices() .refresh(Requests.refreshRequest(timestampedIndexName)).actionGet(); assertMatchAllQuery(1, event); assertBodyQuery(1, event); }
Example 6
Source File: TestLog4jAppenderWithAvro.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testAvroGeneric() throws IOException { loadProperties("flume-log4jtest-avro-generic.properties"); PropertyConfigurator.configure(props); Logger logger = LogManager.getLogger(TestLog4jAppenderWithAvro.class); String msg = "This is log message number " + String.valueOf(0); Schema schema = new Schema.Parser().parse( getClass().getClassLoader().getResource("myrecord.avsc").openStream()); GenericRecordBuilder builder = new GenericRecordBuilder(schema); GenericRecord record = builder.set("message", msg).build(); logger.info(record); Transaction transaction = ch.getTransaction(); transaction.begin(); Event event = ch.take(); Assert.assertNotNull(event); GenericDatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>(schema); BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(event.getBody(), null); GenericRecord recordFromEvent = reader.read(null, decoder); Assert.assertEquals(msg, recordFromEvent.get("message").toString()); Map<String, String> hdrs = event.getHeaders(); Assert.assertNull(hdrs.get(Log4jAvroHeaders.MESSAGE_ENCODING.toString())); Assert.assertEquals("Schema URL should be set", "file:///tmp/myrecord.avsc", hdrs.get(Log4jAvroHeaders.AVRO_SCHEMA_URL.toString ())); Assert.assertNull("Schema string should not be set", hdrs.get(Log4jAvroHeaders.AVRO_SCHEMA_LITERAL.toString())); transaction.commit(); transaction.close(); }
Example 7
Source File: TestHBaseSink.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testThreeEvents() throws Exception { testUtility.createTable(tableName.getBytes(), columnFamily.getBytes()); HBaseSink sink = new HBaseSink(testUtility.getConfiguration()); Configurables.configure(sink, ctx); Channel channel = new MemoryChannel(); Configurables.configure(channel, new Context()); sink.setChannel(channel); sink.start(); Transaction tx = channel.getTransaction(); tx.begin(); for(int i = 0; i < 3; i++){ Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i)); channel.put(e); } tx.commit(); tx.close(); sink.process(); sink.stop(); HTable table = new HTable(testUtility.getConfiguration(), tableName); byte[][] results = getResults(table, 3); byte[] out; int found = 0; for(int i = 0; i < 3; i++){ for(int j = 0; j < 3; j++){ if(Arrays.equals(results[j],Bytes.toBytes(valBase + "-" + i))){ found++; break; } } } Assert.assertEquals(3, found); out = results[3]; Assert.assertArrayEquals(Longs.toByteArray(3), out); testUtility.deleteTable(tableName.getBytes()); }
Example 8
Source File: TestLoadBalancingLog4jAppender.java From mt-flume with Apache License 2.0 | 5 votes |
private void sendAndAssertFail() throws IOException { int level = 20000; String msg = "This is log message number" + String.valueOf(level); fixture.log(Level.toLevel(level), msg); Transaction transaction = ch.getTransaction(); transaction.begin(); Event event = ch.take(); Assert.assertNull(event); transaction.commit(); transaction.close(); }
Example 9
Source File: TestMemoryChannel.java From mt-flume with Apache License 2.0 | 5 votes |
@Test(expected=ChannelException.class) public void testByteCapacityOverload() { Context context = new Context(); Map<String, String> parms = new HashMap<String, String>(); parms.put("byteCapacity", "2000"); parms.put("byteCapacityBufferPercentage", "20"); context.putAll(parms); Configurables.configure(channel, context); byte[] eventBody = new byte[405]; Transaction transaction = channel.getTransaction(); transaction.begin(); channel.put(EventBuilder.withBody(eventBody)); channel.put(EventBuilder.withBody(eventBody)); channel.put(EventBuilder.withBody(eventBody)); transaction.commit(); transaction.close(); transaction = channel.getTransaction(); transaction.begin(); channel.put(EventBuilder.withBody(eventBody)); channel.put(EventBuilder.withBody(eventBody)); // this should kill it transaction.commit(); Assert.fail(); }
Example 10
Source File: TestLoadBalancingLog4jAppender.java From mt-flume with Apache License 2.0 | 5 votes |
private void sendAndAssertMessages(int numberOfMsgs) throws IOException { for (int count = 0; count < numberOfMsgs; count++) { int level = count % 5; String msg = "This is log message number" + String.valueOf(count); fixture.log(Level.toLevel(level), msg); Transaction transaction = ch.getTransaction(); transaction.begin(); Event event = ch.take(); Assert.assertNotNull(event); Assert.assertEquals(new String(event.getBody(), "UTF8"), msg); Map<String, String> hdrs = event.getHeaders(); Assert.assertNotNull(hdrs.get(Log4jAvroHeaders.TIMESTAMP.toString())); Assert.assertEquals(Level.toLevel(level), Level.toLevel(hdrs.get(Log4jAvroHeaders.LOG_LEVEL.toString()))); Assert.assertEquals(fixture.getName(), hdrs.get(Log4jAvroHeaders.LOGGER_NAME.toString())); Assert.assertEquals("UTF8", hdrs.get(Log4jAvroHeaders.MESSAGE_ENCODING.toString())); // To confirm on console we actually got the body System.out.println("Got body: " + new String(event.getBody(), "UTF8")); transaction.commit(); transaction.close(); } }
Example 11
Source File: TestMemoryChannel.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testCapacityBufferEmptyingAfterTakeCommit() { Context context = new Context(); Map<String, String> parms = new HashMap<String, String>(); parms.put("capacity", "3"); parms.put("transactionCapacity", "3"); context.putAll(parms); Configurables.configure(channel, context); Transaction tx = channel.getTransaction(); tx.begin(); channel.put(EventBuilder.withBody("test".getBytes())); channel.put(EventBuilder.withBody("test".getBytes())); channel.put(EventBuilder.withBody("test".getBytes())); tx.commit(); tx.close(); tx = channel.getTransaction(); tx.begin(); channel.take(); channel.take(); tx.commit(); tx.close(); tx = channel.getTransaction(); tx.begin(); channel.put(EventBuilder.withBody("test".getBytes())); channel.put(EventBuilder.withBody("test".getBytes())); tx.commit(); tx.close(); }
Example 12
Source File: TestMemoryChannelTransaction.java From mt-flume with Apache License 2.0 | 4 votes |
@Ignore("BasicChannelSemantics doesn't support re-entrant transactions") @Test public void testReEntTxn() throws InterruptedException, EventDeliveryException { Event event, event2; Context context = new Context(); int putCounter = 0; context.put("keep-alive", "1"); Configurables.configure(channel, context); Transaction transaction = channel.getTransaction(); Assert.assertNotNull(transaction); transaction.begin(); // first begin for (putCounter = 0; putCounter < 10; putCounter++) { transaction.begin(); // inner begin event = EventBuilder.withBody(("test event" + putCounter).getBytes()); channel.put(event); transaction.commit(); // inner commit } transaction.commit(); transaction.close(); transaction = channel.getTransaction(); Assert.assertNotNull(transaction); transaction.begin(); for (int i = 0; i < 10; i++) { event2 = channel.take(); Assert.assertNotNull("lost an event", event2); Assert.assertArrayEquals(event2.getBody(), ("test event" + i).getBytes()); // System.out.println(event2.toString()); } event2 = channel.take(); Assert.assertNull("extra event found", event2); transaction.commit(); transaction.close(); }
Example 13
Source File: ElasticSearchSink.java From mt-flume with Apache License 2.0 | 4 votes |
@Override public Status process() throws EventDeliveryException { logger.debug("processing..."); Status status = Status.READY; Channel channel = getChannel(); Transaction txn = channel.getTransaction(); try { txn.begin(); BulkRequestBuilder bulkRequest = client.prepareBulk(); for (int i = 0; i < batchSize; i++) { Event event = channel.take(); if (event == null) { break; } IndexRequestBuilder indexRequest = indexRequestFactory.createIndexRequest( client, indexName, indexType, event); if (ttlMs > 0) { indexRequest.setTTL(ttlMs); } bulkRequest.add(indexRequest); } int size = bulkRequest.numberOfActions(); if (size <= 0) { sinkCounter.incrementBatchEmptyCount(); counterGroup.incrementAndGet("channel.underflow"); status = Status.BACKOFF; } else { if (size < batchSize) { sinkCounter.incrementBatchUnderflowCount(); status = Status.BACKOFF; } else { sinkCounter.incrementBatchCompleteCount(); } sinkCounter.addToEventDrainAttemptCount(size); BulkResponse bulkResponse = bulkRequest.execute().actionGet(); if (bulkResponse.hasFailures()) { throw new EventDeliveryException(bulkResponse.buildFailureMessage()); } } txn.commit(); sinkCounter.addToEventDrainSuccessCount(size); counterGroup.incrementAndGet("transaction.success"); } catch (Throwable ex) { try { txn.rollback(); counterGroup.incrementAndGet("transaction.rollback"); } catch (Exception ex2) { logger.error( "Exception in rollback. Rollback might not have been successful.", ex2); } if (ex instanceof Error || ex instanceof RuntimeException) { logger.error("Failed to commit transaction. Transaction rolled back.", ex); Throwables.propagate(ex); } else { logger.error("Failed to commit transaction. Transaction rolled back.", ex); throw new EventDeliveryException( "Failed to commit transaction. Transaction rolled back.", ex); } } finally { txn.close(); } return status; }
Example 14
Source File: TestFlumeFailoverTarget.java From datacollector with Apache License 2.0 | 4 votes |
@Test public void testWriteAvroRecordsDropSchemaSingleEvent() throws InterruptedException, StageException, IOException { DataGeneratorFormatConfig dataGeneratorFormatConfig = new DataGeneratorFormatConfig(); dataGeneratorFormatConfig.avroSchema = SdcAvroTestUtil.AVRO_SCHEMA1; dataGeneratorFormatConfig.avroSchemaSource = INLINE; dataGeneratorFormatConfig.includeSchema = false; dataGeneratorFormatConfig.avroCompression = AvroCompression.NULL; FlumeTarget flumeTarget = FlumeTestUtil.createFlumeTarget( FlumeTestUtil.createDefaultFlumeConfig(port, true), DataFormat.AVRO, dataGeneratorFormatConfig ); TargetRunner targetRunner = new TargetRunner.Builder(FlumeDTarget.class, flumeTarget).build(); targetRunner.runInit(); List<Record> records = SdcAvroTestUtil.getRecords1(); targetRunner.runWrite(records); targetRunner.runDestroy(); List<GenericRecord> genericRecords = new ArrayList<>(); DatumReader<GenericRecord> datumReader = new GenericDatumReader<>(); //Reader schema argument is optional datumReader.setSchema(new Schema.Parser().parse(SdcAvroTestUtil.AVRO_SCHEMA1)); int eventCounter = 0; Transaction transaction = ch.getTransaction(); transaction.begin(); Event event = ch.take(); while(event != null) { eventCounter++; BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(event.getBody(), null); GenericRecord read = datumReader.read(null, decoder); while(read != null) { genericRecords.add(read); try { read = datumReader.read(null, decoder); } catch (EOFException e) { break; } } event = ch.take(); } transaction.commit(); transaction.close(); Assert.assertEquals(1, eventCounter); Assert.assertEquals(3, genericRecords.size()); SdcAvroTestUtil.compare1(genericRecords); }
Example 15
Source File: TestHDFSEventSink.java From mt-flume with Apache License 2.0 | 4 votes |
@Test public void testAppend() throws InterruptedException, LifecycleException, EventDeliveryException, IOException { LOG.debug("Starting..."); final long rollCount = 3; final long batchSize = 2; final String fileName = "FlumeData"; // clear the test directory Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); Path dirPath = new Path(testPath); fs.delete(dirPath, true); fs.mkdirs(dirPath); Context context = new Context(); context.put("hdfs.path", testPath + "/%Y-%m-%d/%H"); context.put("hdfs.timeZone", "UTC"); context.put("hdfs.filePrefix", fileName); context.put("hdfs.rollCount", String.valueOf(rollCount)); context.put("hdfs.batchSize", String.valueOf(batchSize)); Configurables.configure(sink, context); Channel channel = new MemoryChannel(); Configurables.configure(channel, context); sink.setChannel(channel); sink.start(); Calendar eventDate = Calendar.getInstance(); List<String> bodies = Lists.newArrayList(); // push the event batches into channel for (int i = 1; i < 4; i++) { Transaction txn = channel.getTransaction(); txn.begin(); for (int j = 1; j <= batchSize; j++) { Event event = new SimpleEvent(); eventDate.clear(); eventDate.set(2011, i, i, i, 0); // yy mm dd event.getHeaders().put("timestamp", String.valueOf(eventDate.getTimeInMillis())); event.getHeaders().put("hostname", "Host" + i); String body = "Test." + i + "." + j; event.setBody(body.getBytes()); bodies.add(body); channel.put(event); } txn.commit(); txn.close(); // execute sink to process the events sink.process(); } sink.stop(); verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies); }
Example 16
Source File: TestMemoryChannel.java From mt-flume with Apache License 2.0 | 4 votes |
@Test public void testChannelResize() { Context context = new Context(); Map<String, String> parms = new HashMap<String, String>(); parms.put("capacity", "5"); parms.put("transactionCapacity", "5"); context.putAll(parms); Configurables.configure(channel, context); Transaction transaction = channel.getTransaction(); transaction.begin(); for(int i=0; i < 5; i++) { channel.put(EventBuilder.withBody(String.format("test event %d", i).getBytes())); } transaction.commit(); transaction.close(); /* * Verify overflow semantics */ transaction = channel.getTransaction(); boolean overflowed = false; try { transaction.begin(); channel.put(EventBuilder.withBody("overflow event".getBytes())); transaction.commit(); } catch (ChannelException e) { overflowed = true; transaction.rollback(); } finally { transaction.close(); } Assert.assertTrue(overflowed); /* * Reconfigure capacity down and add another event, shouldn't result in exception */ parms.put("capacity", "6"); context.putAll(parms); Configurables.configure(channel, context); transaction = channel.getTransaction(); transaction.begin(); channel.put(EventBuilder.withBody("extended capacity event".getBytes())); transaction.commit(); transaction.close(); /* * Attempt to reconfigure capacity to below current entry count and verify * it wasn't carried out */ parms.put("capacity", "2"); parms.put("transactionCapacity", "2"); context.putAll(parms); Configurables.configure(channel, context); for(int i=0; i < 6; i++) { transaction = channel.getTransaction(); transaction.begin(); Assert.assertNotNull(channel.take()); transaction.commit(); transaction.close(); } }
Example 17
Source File: TestFlumeThriftTarget.java From datacollector with Apache License 2.0 | 4 votes |
@Test public void testWriteStringRecordsFromJSON3() throws InterruptedException, StageException, IOException { DataGeneratorFormatConfig dataGeneratorFormatConfig = new DataGeneratorFormatConfig(); dataGeneratorFormatConfig.textFieldPath = "/"; //MAP dataGeneratorFormatConfig.textEmptyLineIfNull = false; FlumeTarget flumeTarget = FlumeTestUtil.createFlumeTarget( FlumeTestUtil.createFlumeConfig( false, // backOff 1, // batchSize ClientType.THRIFT, 1000, // connection timeout ImmutableMap.of("h1", "localhost:" + port), HostSelectionStrategy.RANDOM, -1, // maxBackOff 1, // maxRetryAttempts 1000, // requestTimeout false, // singleEventPerBatch 0 ), DataFormat.TEXT, dataGeneratorFormatConfig ); TargetRunner targetRunner = new TargetRunner.Builder(FlumeDTarget.class, flumeTarget) .setOnRecordError(OnRecordError.TO_ERROR).build(); targetRunner.runInit(); List<Record> logRecords = FlumeTestUtil.createJsonRecords(); targetRunner.runWrite(logRecords); //All records must be sent to error Assert.assertEquals(logRecords.size(), targetRunner.getErrorRecords().size()); targetRunner.runDestroy(); Transaction transaction = ch.getTransaction(); transaction.begin(); Event event = ch.take(); Assert.assertNull(event); transaction.commit(); transaction.close(); }
Example 18
Source File: TestFlumeThriftTarget.java From datacollector with Apache License 2.0 | 4 votes |
@Test public void testWriteStringRecordsFromJSON2() throws InterruptedException, StageException, IOException { DataGeneratorFormatConfig dataGeneratorFormatConfig = new DataGeneratorFormatConfig(); dataGeneratorFormatConfig.textFieldPath = "/lastStatusChange"; dataGeneratorFormatConfig.textEmptyLineIfNull = false; FlumeTarget flumeTarget = FlumeTestUtil.createFlumeTarget( FlumeTestUtil.createFlumeConfig( false, // backOff 1, // batchSize ClientType.THRIFT, 1000, // connection timeout ImmutableMap.of("h1", "localhost:" + port), HostSelectionStrategy.RANDOM, -1, // maxBackOff 1, // maxRetryAttempts 1000, // requestTimeout false, // singleEventPerBatch 0 ), DataFormat.TEXT, dataGeneratorFormatConfig ); TargetRunner targetRunner = new TargetRunner.Builder(FlumeDTarget.class, flumeTarget).build(); targetRunner.runInit(); List<Record> logRecords = FlumeTestUtil.createJsonRecords(); targetRunner.runWrite(logRecords); targetRunner.runDestroy(); for(Record r : logRecords) { Transaction transaction = ch.getTransaction(); transaction.begin(); Event event = ch.take(); Assert.assertNotNull(event); Assert.assertEquals(r.get().getValueAsMap().get("lastStatusChange").getValueAsString(), new String(event.getBody()).trim()); Assert.assertTrue(event.getHeaders().containsKey("charset")); Assert.assertEquals("UTF-8", event.getHeaders().get("charset")); transaction.commit(); transaction.close(); } }
Example 19
Source File: HBaseSink.java From mt-flume with Apache License 2.0 | 4 votes |
private void putEventsAndCommit(final List<Row> actions, final List<Increment> incs, Transaction txn) throws EventDeliveryException { try { runPrivileged(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { for(Row r : actions) { if(r instanceof Put) { ((Put)r).setWriteToWAL(enableWal); } // Newer versions of HBase - Increment implements Row. if(r instanceof Increment) { ((Increment)r).setWriteToWAL(enableWal); } } table.batch(actions); return null; } }); runPrivileged(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { for (final Increment i : incs) { i.setWriteToWAL(enableWal); table.increment(i); } return null; } }); txn.commit(); sinkCounter.addToEventDrainSuccessCount(actions.size()); } catch (Throwable e) { try{ txn.rollback(); } catch (Exception e2) { logger.error("Exception in rollback. Rollback might not have been" + "successful." , e2); } logger.error("Failed to commit transaction." + "Transaction rolled back.", e); if(e instanceof Error || e instanceof RuntimeException){ logger.error("Failed to commit transaction." + "Transaction rolled back.", e); Throwables.propagate(e); } else { logger.error("Failed to commit transaction." + "Transaction rolled back.", e); throw new EventDeliveryException("Failed to commit transaction." + "Transaction rolled back.", e); } } finally { txn.close(); } }
Example 20
Source File: TestHBaseSink.java From mt-flume with Apache License 2.0 | 4 votes |
/** * This test must run last - it shuts down the minicluster :D * @throws Exception */ @Ignore("For dev builds only:" + "This test takes too long, and this has to be run after all other" + "tests, since it shuts down the minicluster. " + "Comment out all other tests" + "and uncomment this annotation to run this test.") @Test(expected = EventDeliveryException.class) public void testHBaseFailure() throws Exception { ctx.put("batchSize", "2"); testUtility.createTable(tableName.getBytes(), columnFamily.getBytes()); HBaseSink sink = new HBaseSink(testUtility.getConfiguration()); Configurables.configure(sink, ctx); //Reset the context to a higher batchSize ctx.put("batchSize", "100"); Channel channel = new MemoryChannel(); Configurables.configure(channel, new Context()); sink.setChannel(channel); sink.start(); Transaction tx = channel.getTransaction(); tx.begin(); for(int i = 0; i < 3; i++){ Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i)); channel.put(e); } tx.commit(); tx.close(); sink.process(); HTable table = new HTable(testUtility.getConfiguration(), tableName); byte[][] results = getResults(table, 2); byte[] out; int found = 0; for(int i = 0; i < 2; i++){ for(int j = 0; j < 2; j++){ if(Arrays.equals(results[j],Bytes.toBytes(valBase + "-" + i))){ found++; break; } } } Assert.assertEquals(2, found); out = results[2]; Assert.assertArrayEquals(Longs.toByteArray(2), out); testUtility.shutdownMiniCluster(); sink.process(); sink.stop(); }