Java Code Examples for org.apache.flume.Channel#take()
The following examples show how to use
org.apache.flume.Channel#take() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSequenceGeneratorSource.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testBatchProcessWithLifeCycle() throws InterruptedException, LifecycleException, EventDeliveryException { int batchSize = 10; Channel channel = new PseudoTxnMemoryChannel(); Context context = new Context(); context.put("logicalNode.name", "test"); context.put("batchSize", Integer.toString(batchSize)); Configurables.configure(source, context); Configurables.configure(channel, context); List<Channel> channels = new ArrayList<Channel>(); channels.add(channel); ChannelSelector rcs = new ReplicatingChannelSelector(); rcs.setChannels(channels); source.setChannelProcessor(new ChannelProcessor(rcs)); source.start(); for (long i = 0; i < 100; i++) { source.process(); for (long j = batchSize; j > 0; j--) { Event event = channel.take(); String expectedVal = String.valueOf(((i+1)*batchSize)-j); String resultedVal = new String(event.getBody()); Assert.assertTrue("Expected " + expectedVal + " is not equals to " + resultedVal, expectedVal.equals(resultedVal)); } } source.stop(); }
Example 2
Source File: DruidSink.java From ingestion with Apache License 2.0 | 5 votes |
private Event buildEvent(Channel channel) { final Event takenEvent = channel.take(); final ObjectNode objectNode = new ObjectNode(JsonNodeFactory.instance); Event event = null; if (takenEvent != null) { event = EventBuilder.withBody(objectNode.toString().getBytes(Charsets.UTF_8), takenEvent.getHeaders()); } return event; }
Example 3
Source File: KafkaSink.java From ingestion with Apache License 2.0 | 5 votes |
@Override public Status process() throws EventDeliveryException { Channel channel = getChannel(); Transaction tx = channel.getTransaction(); try { tx.begin(); Event event = channel.take(); if (event == null) { tx.commit(); return Status.READY; } String data = null; if(writeBody){ data = new String(event.getBody()); } else { data = mapper.writeValueAsString(event.getHeaders()); } producer.send(new KeyedMessage<String, String>(topic, data)); tx.commit(); return Status.READY; } catch (Exception e) { try { tx.rollback(); return Status.BACKOFF; } catch (Exception e2) { log.error("Rollback Exception:{}", e2); } log.error("KafkaSink Exception:{}", e); return Status.BACKOFF; } finally { tx.close(); } }
Example 4
Source File: TestUtils.java From mt-flume with Apache License 2.0 | 5 votes |
public static Set<String> takeWithoutCommit(Channel channel, Transaction tx, int number) { Set<String> events = Sets.newHashSet(); tx.begin(); for (int i = 0; i < number; i++) { Event e = channel.take(); if (e == null) { break; } events.add(new String(e.getBody())); } return events; }
Example 5
Source File: HBaseSink.java From mt-flume with Apache License 2.0 | 5 votes |
@Override public Status process() throws EventDeliveryException { Status status = Status.READY; Channel channel = getChannel(); Transaction txn = channel.getTransaction(); List<Row> actions = new LinkedList<Row>(); List<Increment> incs = new LinkedList<Increment>(); txn.begin(); long i = 0; for(; i < batchSize; i++) { Event event = channel.take(); if(event == null){ status = Status.BACKOFF; if (i == 0) { sinkCounter.incrementBatchEmptyCount(); } else { sinkCounter.incrementBatchUnderflowCount(); } break; } else { serializer.initialize(event, columnFamily); actions.addAll(serializer.getActions()); incs.addAll(serializer.getIncrements()); } } if (i == batchSize) { sinkCounter.incrementBatchCompleteCount(); } sinkCounter.addToEventDrainAttemptCount(i); putEventsAndCommit(actions, incs, txn); return status; }
Example 6
Source File: KafkaSink.java From flume-ng-kafka-sink with Apache License 2.0 | 5 votes |
public Status process() throws EventDeliveryException { Channel channel = getChannel(); Transaction tx = channel.getTransaction(); try { tx.begin(); Event event = channel.take(); if (event == null) { tx.commit(); return Status.READY; } producer.send(new ProducerData<String, String>(topic, new String(event .getBody()))); log.trace("Message: {}", event.getBody()); tx.commit(); return Status.READY; } catch (Exception e) { try { tx.rollback(); return Status.BACKOFF; } catch (Exception e2) { log.error("Rollback Exception:{}", e2); } log.error("KafkaSink Exception:{}", e); return Status.BACKOFF; } finally { tx.close(); } }
Example 7
Source File: TestSequenceGeneratorSource.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testLifecycle() throws InterruptedException, EventDeliveryException { Channel channel = new PseudoTxnMemoryChannel(); Context context = new Context(); context.put("logicalNode.name", "test"); Configurables.configure(source, context); Configurables.configure(channel, context); List<Channel> channels = new ArrayList<Channel>(); channels.add(channel); ChannelSelector rcs = new ReplicatingChannelSelector(); rcs.setChannels(channels); source.setChannelProcessor(new ChannelProcessor(rcs)); source.start(); for (long i = 0; i < 100; i++) { source.process(); Event event = channel.take(); Assert.assertArrayEquals(String.valueOf(i).getBytes(), new String(event.getBody()).getBytes()); } source.stop(); }
Example 8
Source File: TestSequenceGeneratorSource.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testProcess() throws InterruptedException, LifecycleException, EventDeliveryException { Channel channel = new PseudoTxnMemoryChannel(); Context context = new Context(); context.put("logicalNode.name", "test"); Configurables.configure(source, context); Configurables.configure(channel, context); List<Channel> channels = new ArrayList<Channel>(); channels.add(channel); ChannelSelector rcs = new ReplicatingChannelSelector(); rcs.setChannels(channels); source.setChannelProcessor(new ChannelProcessor(rcs)); for (long i = 0; i < 100; i++) { source.process(); Event event = channel.take(); Assert.assertArrayEquals(String.valueOf(i).getBytes(), new String(event.getBody()).getBytes()); } }
Example 9
Source File: TestMultiportSyslogTCPSource.java From mt-flume with Apache License 2.0 | 5 votes |
private static Event takeEvent(Channel channel) { Transaction txn = channel.getTransaction(); txn.begin(); Event evt = channel.take(); txn.commit(); txn.close(); return evt; }
Example 10
Source File: LoggerSink.java From mt-flume with Apache License 2.0 | 5 votes |
@Override public Status process() throws EventDeliveryException { Status result = Status.READY; Channel channel = getChannel(); Transaction transaction = channel.getTransaction(); Event event = null; try { transaction.begin(); event = channel.take(); if (event != null) { if (logger.isInfoEnabled()) { logger.info("Event: " + EventHelper.dumpEvent(event)); } } else { // No event found, request back-off semantics from the sink runner result = Status.BACKOFF; } transaction.commit(); } catch (Exception ex) { transaction.rollback(); throw new EventDeliveryException("Failed to log event: " + event, ex); } finally { transaction.close(); } return result; }
Example 11
Source File: FlumeThriftService.java From bahir-flink with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { //Flume Source ThriftSource source = new ThriftSource(); Channel ch = new MemoryChannel(); Configurables.configure(ch, new Context()); Context context = new Context(); context.put("port", String.valueOf(port)); context.put("bind", hostname); Configurables.configure(source, context); List<Channel> channels = new ArrayList<>(); channels.add(ch); ChannelSelector rcs = new ReplicatingChannelSelector(); rcs.setChannels(channels); source.setChannelProcessor(new ChannelProcessor(rcs)); source.start(); System.out.println("ThriftSource service start."); while (true) { Transaction transaction = ch.getTransaction(); transaction.begin(); Event event = ch.take(); if (null != event) { System.out.println(event); System.out.println(new String(event.getBody()).trim()); } transaction.commit(); transaction.close(); } }
Example 12
Source File: TestMultiLineExecSource.java From flume-plugins with MIT License | 4 votes |
@Test public void testProcess() throws InterruptedException, LifecycleException, EventDeliveryException, IOException { Channel channel = new MemoryChannel(); Context context = new Context(); String resource = "src/test/resources/server.log"; assertNotNull(resource); int expectedNrOfEvents = 0; BufferedReader br = new BufferedReader(new FileReader(resource)); String line; while ((line = br.readLine()) != null) { if (line.endsWith("|#]")) { expectedNrOfEvents++; } } context.put("command", format("cat %s", resource)); context.put("event.terminator", "|#]"); context.put("keep-alive", "1"); context.put("capacity", "100000"); context.put("transactionCapacity", "100000"); Configurables.configure(source, context); Configurables.configure(channel, context); ChannelSelector rcs = new ReplicatingChannelSelector(); rcs.setChannels(Lists.newArrayList(channel)); source.setChannelProcessor(new ChannelProcessor(rcs)); source.start(); Transaction transaction = channel.getTransaction(); transaction.begin(); Event event; int actualNrOfEvents = 0; FileOutputStream outputStream = new FileOutputStream("/tmp/flume-execsource." + Thread.currentThread().getId()); while ((event = channel.take()) != null) { outputStream.write(event.getBody()); outputStream.write('\n'); outputStream.write('\n'); actualNrOfEvents++; } outputStream.close(); transaction.commit(); transaction.close(); source.stop(); File actualFile = new File("/tmp/flume-execsource." + Thread.currentThread().getId()); File expectedFile = new File(resource); assertEquals(expectedNrOfEvents, actualNrOfEvents); // This doesn't work anymore since we implemented the different event separator ยง instead of \n // assertEquals(FileUtils.checksumCRC32(expectedFile), FileUtils.checksumCRC32(actualFile)); FileUtils.forceDelete(actualFile); }
Example 13
Source File: ElasticSearchSink.java From flume-elasticsearch-sink with Apache License 2.0 | 4 votes |
@Override public Status process() { if (shouldBackOff.get()) { throw new NoNodeAvailableException("Check whether Elasticsearch is down or not."); } Channel channel = getChannel(); Transaction txn = channel.getTransaction(); txn.begin(); try { Event event = channel.take(); if (event != null) { String body = new String(event.getBody(), Charsets.UTF_8); if (!Strings.isNullOrEmpty(body)) { logger.debug("start to sink event [{}].", body); String index = indexBuilder.getIndex(event); String type = indexBuilder.getType(event); String id = indexBuilder.getId(event); XContentBuilder xContentBuilder = serializer.serialize(event); if (xContentBuilder != null) { if (!(Strings.isNullOrEmpty(id))) { bulkProcessor.add(new IndexRequest(index, type, id) .source(xContentBuilder)); } else { bulkProcessor.add(new IndexRequest(index, type) .source(xContentBuilder)); } } else { logger.error("Could not serialize the event body [{}] for index [{}], type[{}] and id [{}] ", new Object[]{body, index, type, id}); } } logger.debug("sink event [{}] successfully.", body); } txn.commit(); return Status.READY; } catch (Throwable tx) { try { txn.rollback(); } catch (Exception ex) { logger.error("exception in rollback.", ex); } logger.error("transaction rolled back.", tx); return Status.BACKOFF; } finally { txn.close(); } }
Example 14
Source File: AbstractRpcSink.java From mt-flume with Apache License 2.0 | 4 votes |
@Override public Status process() throws EventDeliveryException { Status status = Status.READY; Channel channel = getChannel(); Transaction transaction = channel.getTransaction(); resetLock.lock(); try { transaction.begin(); verifyConnection(); List<Event> batch = Lists.newLinkedList(); for (int i = 0; i < client.getBatchSize(); i++) { Event event = channel.take(); if (event == null) { break; } batch.add(event); } int size = batch.size(); int batchSize = client.getBatchSize(); logger.info("RpcSink " + getName() + " consume " + size + ", want " + batchSize); if (size == 0) { sinkCounter.incrementBatchEmptyCount(); status = Status.BACKOFF; } else { if (size < batchSize) { sinkCounter.incrementBatchUnderflowCount(); } else { sinkCounter.incrementBatchCompleteCount(); } sinkCounter.addToEventDrainAttemptCount(size); client.appendBatch(batch); } transaction.commit(); sinkCounter.addToEventDrainSuccessCount(size); } catch (Throwable t) { transaction.rollback(); if (t instanceof Error) { throw (Error) t; } else if (t instanceof ChannelException) { logger.error("Rpc Sink " + getName() + ": Unable to get event from" + " channel " + channel.getName() + ". Exception follows.", t); status = Status.BACKOFF; } else { destroyConnection(); throw new EventDeliveryException("Failed to send events", t); } } finally { resetLock.unlock(); transaction.close(); } return status; }
Example 15
Source File: MorphlineSink.java From mt-flume with Apache License 2.0 | 4 votes |
@Override public Status process() throws EventDeliveryException { int batchSize = getMaxBatchSize(); long batchEndTime = System.currentTimeMillis() + getMaxBatchDurationMillis(); Channel myChannel = getChannel(); Transaction txn = myChannel.getTransaction(); txn.begin(); boolean isMorphlineTransactionCommitted = true; try { int numEventsTaken = 0; handler.beginTransaction(); isMorphlineTransactionCommitted = false; // repeatedly take and process events from the Flume queue for (int i = 0; i < batchSize; i++) { Event event = myChannel.take(); if (event == null) { break; } numEventsTaken++; LOGGER.debug("Flume event: {}", event); //StreamEvent streamEvent = createStreamEvent(event); handler.process(event); if (System.currentTimeMillis() >= batchEndTime) { break; } } // update metrics if (numEventsTaken == 0) { sinkCounter.incrementBatchEmptyCount(); } if (numEventsTaken < batchSize) { sinkCounter.incrementBatchUnderflowCount(); } else { sinkCounter.incrementBatchCompleteCount(); } sinkCounter.addToEventDrainAttemptCount(numEventsTaken); sinkCounter.addToEventDrainSuccessCount(numEventsTaken); handler.commitTransaction(); isMorphlineTransactionCommitted = true; txn.commit(); return numEventsTaken == 0 ? Status.BACKOFF : Status.READY; } catch (Throwable t) { // Ooops - need to rollback and back off LOGGER.error("Morphline Sink " + getName() + ": Unable to process event from channel " + myChannel.getName() + ". Exception follows.", t); try { if (!isMorphlineTransactionCommitted) { handler.rollbackTransaction(); } } catch (Throwable t2) { LOGGER.error("Morphline Sink " + getName() + ": Unable to rollback morphline transaction. " + "Exception follows.", t2); } finally { try { txn.rollback(); } catch (Throwable t4) { LOGGER.error("Morphline Sink " + getName() + ": Unable to rollback Flume transaction. " + "Exception follows.", t4); } } if (t instanceof Error) { throw (Error) t; // rethrow original exception } else if (t instanceof ChannelException) { return Status.BACKOFF; } else { throw new EventDeliveryException("Failed to send events", t); // rethrow and backoff } } finally { txn.close(); } }
Example 16
Source File: ElasticSearchSink.java From mt-flume with Apache License 2.0 | 4 votes |
@Override public Status process() throws EventDeliveryException { logger.debug("processing..."); Status status = Status.READY; Channel channel = getChannel(); Transaction txn = channel.getTransaction(); try { txn.begin(); BulkRequestBuilder bulkRequest = client.prepareBulk(); for (int i = 0; i < batchSize; i++) { Event event = channel.take(); if (event == null) { break; } IndexRequestBuilder indexRequest = indexRequestFactory.createIndexRequest( client, indexName, indexType, event); if (ttlMs > 0) { indexRequest.setTTL(ttlMs); } bulkRequest.add(indexRequest); } int size = bulkRequest.numberOfActions(); if (size <= 0) { sinkCounter.incrementBatchEmptyCount(); counterGroup.incrementAndGet("channel.underflow"); status = Status.BACKOFF; } else { if (size < batchSize) { sinkCounter.incrementBatchUnderflowCount(); status = Status.BACKOFF; } else { sinkCounter.incrementBatchCompleteCount(); } sinkCounter.addToEventDrainAttemptCount(size); BulkResponse bulkResponse = bulkRequest.execute().actionGet(); if (bulkResponse.hasFailures()) { throw new EventDeliveryException(bulkResponse.buildFailureMessage()); } } txn.commit(); sinkCounter.addToEventDrainSuccessCount(size); counterGroup.incrementAndGet("transaction.success"); } catch (Throwable ex) { try { txn.rollback(); counterGroup.incrementAndGet("transaction.rollback"); } catch (Exception ex2) { logger.error( "Exception in rollback. Rollback might not have been successful.", ex2); } if (ex instanceof Error || ex instanceof RuntimeException) { logger.error("Failed to commit transaction. Transaction rolled back.", ex); Throwables.propagate(ex); } else { logger.error("Failed to commit transaction. Transaction rolled back.", ex); throw new EventDeliveryException( "Failed to commit transaction. Transaction rolled back.", ex); } } finally { txn.close(); } return status; }
Example 17
Source File: DatasetSink.java From kite with Apache License 2.0 | 4 votes |
@Override public Status process() throws EventDeliveryException { if (writer == null) { try { this.writer = newWriter(login, target); } catch (DatasetException e) { // DatasetException includes DatasetNotFoundException throw new EventDeliveryException( "Cannot write to " + getName(), e); } } // handle file rolling if ((System.currentTimeMillis() - lastRolledMs) / 1000 > rollIntervalS) { // close the current writer and get a new one writer.close(); this.writer = newWriter(login, target); this.lastRolledMs = System.currentTimeMillis(); LOG.info("Rolled writer for " + getName()); } Channel channel = getChannel(); Transaction transaction = null; try { long processedEvents = 0; transaction = channel.getTransaction(); transaction.begin(); for (; processedEvents < batchSize; processedEvents += 1) { Event event = channel.take(); if (event == null) { // no events available in the channel break; } this.datum = deserialize(event, reuseDatum ? datum : null); // writeEncoded would be an optimization in some cases, but HBase // will not support it and partitioned Datasets need to get partition // info from the entity Object. We may be able to avoid the // serialization round-trip otherwise. writer.write(datum); } // TODO: Add option to sync, depends on CDK-203 if (writer instanceof Flushable) { ((Flushable) writer).flush(); } // commit after data has been written and flushed transaction.commit(); if (processedEvents == 0) { counter.incrementBatchEmptyCount(); return Status.BACKOFF; } else if (processedEvents < batchSize) { counter.incrementBatchUnderflowCount(); } else { counter.incrementBatchCompleteCount(); } counter.addToEventDrainSuccessCount(processedEvents); return Status.READY; } catch (Throwable th) { // catch-all for any unhandled Throwable so that the transaction is // correctly rolled back. if (transaction != null) { try { transaction.rollback(); } catch (Exception ex) { LOG.error("Transaction rollback failed", ex); throw Throwables.propagate(ex); } } // close the writer and remove the its reference writer.close(); this.writer = null; this.lastRolledMs = System.currentTimeMillis(); // handle the exception Throwables.propagateIfInstanceOf(th, Error.class); Throwables.propagateIfInstanceOf(th, EventDeliveryException.class); throw new EventDeliveryException(th); } finally { if (transaction != null) { transaction.close(); } } }
Example 18
Source File: TestFlumeLoadBalancingTarget.java From datacollector with Apache License 2.0 | 4 votes |
@Test public void testWriteStringRecordsRoundRobin() throws StageException { DataGeneratorFormatConfig dataGeneratorFormatConfig = new DataGeneratorFormatConfig(); dataGeneratorFormatConfig.textFieldPath = "/"; dataGeneratorFormatConfig.textEmptyLineIfNull = false; FlumeTarget flumeTarget = FlumeTestUtil.createFlumeTarget( FlumeTestUtil.createFlumeConfig( false, // backOff 100, // batchSize ClientType.AVRO_LOAD_BALANCING, 2000, // connection timeout flumeHosts, HostSelectionStrategy.ROUND_ROBIN, -1, // maxBackOff 1, // maxRetryAttempts 2000, // requestTimeout false, // singleEventPerBatch 0 ), DataFormat.TEXT, dataGeneratorFormatConfig ); TargetRunner targetRunner = new TargetRunner.Builder(FlumeDTarget.class, flumeTarget).build(); targetRunner.runInit(); List<List<Record>> logRecords = new ArrayList<>(NUM_HOSTS); for(int i = 0; i < NUM_HOSTS; i++) { logRecords.add(FlumeTestUtil.createStringRecords()); } for(int i = 0; i < NUM_HOSTS; i++) { targetRunner.runWrite(logRecords.get(i)); } targetRunner.runDestroy(); for(int i = 0;i < logRecords.size(); i++) { Channel channel = chs.get(i % NUM_HOSTS); List<Record> records = logRecords.get(i); for(int j = 0; j < records.size(); j++) { Transaction transaction = channel.getTransaction(); transaction.begin(); Event event = channel.take(); Assert.assertNotNull(event); Assert.assertEquals(records.get(j).get().getValueAsString(), new String(event.getBody()).trim()); Assert.assertTrue(event.getHeaders().containsKey("charset")); Assert.assertEquals("UTF-8", event.getHeaders().get("charset")); transaction.commit(); transaction.close(); } } }
Example 19
Source File: ElasticSearchSink.java From ElasticsearchSink2 with Apache License 2.0 | 4 votes |
@Override public Status process() throws EventDeliveryException { logger.debug("processing..."); Status status = Status.READY; Channel channel = getChannel(); Transaction txn = channel.getTransaction(); try { txn.begin(); int count; for (count = 0; count < batchSize; ++count) { Event event = channel.take(); if (event == null) { break; } String realIndexType = BucketPath.escapeString(indexType, event.getHeaders()); client.addEvent(event, indexNameBuilder, realIndexType, ttlMs); } if (count <= 0) { sinkCounter.incrementBatchEmptyCount(); counterGroup.incrementAndGet("channel.underflow"); status = Status.BACKOFF; } else { if (count < batchSize) { sinkCounter.incrementBatchUnderflowCount(); status = Status.BACKOFF; } else { sinkCounter.incrementBatchCompleteCount(); } sinkCounter.addToEventDrainAttemptCount(count); client.execute(); } txn.commit(); sinkCounter.addToEventDrainSuccessCount(count); counterGroup.incrementAndGet("transaction.success"); } catch (Throwable ex) { try { txn.rollback(); counterGroup.incrementAndGet("transaction.rollback"); } catch (Exception ex2) { logger.error( "Exception in rollback. Rollback might not have been successful.", ex2); } if (ex instanceof Error || ex instanceof RuntimeException) { logger.error(FAILED_TO_COMMIT_TRANSACTION_TRANSACTION_ROLLED_BACK, ex); Throwables.propagate(ex); } else { logger.error(FAILED_TO_COMMIT_TRANSACTION_TRANSACTION_ROLLED_BACK, ex); throw new EventDeliveryException( FAILED_TO_COMMIT_TRANSACTION_TRANSACTION_ROLLED_BACK, ex); } } finally { txn.close(); } return status; }
Example 20
Source File: JDBCSink.java From ingestion with Apache License 2.0 | 4 votes |
@Override public Status process() throws EventDeliveryException { log.debug("Executing JDBCSink.process()..."); Status status = Status.READY; Channel channel = getChannel(); Transaction txn = channel.getTransaction(); try { txn.begin(); int count; List<Event> eventList= new ArrayList<Event>(); for (count = 0; count < batchSize; ++count) { Event event = channel.take(); if (event == null) { break; } eventList.add(event); } if (count <= 0) { sinkCounter.incrementBatchEmptyCount(); counterGroup.incrementAndGet("channel.underflow"); status = Status.BACKOFF; } else { if (count < batchSize) { sinkCounter.incrementBatchUnderflowCount(); status = Status.BACKOFF; } else { sinkCounter.incrementBatchCompleteCount(); } final boolean success = this.queryGenerator.executeQuery(create, eventList); if (!success) { throw new JDBCSinkException("Query failed"); } connection.commit(); sinkCounter.addToEventDrainAttemptCount(count); } txn.commit(); sinkCounter.addToEventDrainSuccessCount(count); counterGroup.incrementAndGet("transaction.success"); } catch (Throwable t) { log.error("Exception during process", t); try { connection.rollback(); } catch (SQLException ex) { log.error("Exception on rollback", ex); } finally { txn.rollback(); status = Status.BACKOFF; this.sinkCounter.incrementConnectionFailedCount(); if (t instanceof Error) { throw new JDBCSinkException(t); } } } finally { txn.close(); } return status; }