Java Code Examples for org.apache.flume.channel.MemoryChannel#start()

The following examples show how to use org.apache.flume.channel.MemoryChannel#start() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FlumeAgentServiceImpl.java    From searchanalytics-bigdata with MIT License 6 votes vote down vote up
private void createSparkAvroSink() {
	sparkAvroChannel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(sparkAvroChannel, channelContext);
	String channelName = "SparkAvroMemoryChannel-" + UUID.randomUUID();
	sparkAvroChannel.setName(channelName);

	sparkAvroSink = new AvroSink();
	sparkAvroSink.setName("SparkAvroSink-" + UUID.randomUUID());
	Map<String, String> paramters = new HashMap<>();
	paramters.put("type", "avro");
	paramters.put("hostname", "localhost");
	paramters.put("port", "41111");
	paramters.put("batch-size", "100");
	Context sinkContext = new Context(paramters);
	sparkAvroSink.configure(sinkContext);
	Configurables.configure(sparkAvroSink, sinkContext);
	sparkAvroSink.setChannel(sparkAvroChannel);

	sparkAvroChannel.start();
	sparkAvroSink.start();
}
 
Example 2
Source File: DruidSinkIT.java    From ingestion with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() {
    //        Context channelContext = new Context();
    //        channelContext.put("checkpointDir","data/check");
    //        channelContext.put("dataDirs","data/data");
    //        channelContext.put("capacity","1000");
    //        channelContext.put("transactionCapacity","100");
    //        channelContext.put("checkpointInterval","300");
    //        channel = new FileChannel();
    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "5000");
    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);
    channel.start();

    druidSink = new DruidSink();
    druidSink.setChannel(channel);
    druidSink.configure(getMockContext());
    druidSink.start();
}
 
Example 3
Source File: FlumeHbaseSinkServiceImpl.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
private void createSink() {
		
		channel = new MemoryChannel();
		Map<String, String> channelParamters = new HashMap<>();
		channelParamters.put("capacity", "100000");
		channelParamters.put("transactionCapacity", "1000");
		Context channelContext = new Context(channelParamters);
		Configurables.configure(channel, channelContext);
		channel.setName("HbaseSinkChannel-" + UUID.randomUUID());

		sink = new HBaseSink();
		sink.setName("HbaseSink-" + UUID.randomUUID());
		Map<String, String> paramters = new HashMap<>();
		paramters.put(HBaseSinkConfigurationConstants.CONFIG_TABLE, "searchclicks");
		paramters.put(HBaseSinkConfigurationConstants.CONFIG_COLUMN_FAMILY, new String(HbaseJsonEventSerializer.COLUMFAMILY_CLIENT_BYTES));
		paramters.put(HBaseSinkConfigurationConstants.CONFIG_BATCHSIZE, "1000");
//		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER, RegexHbaseEventSerializer.class.getName());
//		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER + "." + RegexHbaseEventSerializer.REGEX_CONFIG, RegexHbaseEventSerializer.REGEX_DEFAULT);
//		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER + "." + RegexHbaseEventSerializer.IGNORE_CASE_CONFIG, "true");
//		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER + "." + RegexHbaseEventSerializer.COL_NAME_CONFIG, "json");
		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER, HbaseJsonEventSerializer.class.getName());

		
		Context sinkContext = new Context(paramters);
		sink.configure(sinkContext);
		sink.setChannel(channel);

		sink.start();
		channel.start();
	}
 
Example 4
Source File: FlumeESSinkServiceImpl.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
private void createSink() {
	sink = new ElasticSearchSink();
	sink.setName("ElasticSearchSink-" + UUID.randomUUID());
	channel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(channel, channelContext);
	channel.setName("ElasticSearchSinkChannel-" + UUID.randomUUID());

	Map<String, String> paramters = new HashMap<>();
	paramters.put(ElasticSearchSinkConstants.HOSTNAMES, "127.0.0.1:9310");
	String indexNamePrefix = "recentlyviewed";
	paramters.put(ElasticSearchSinkConstants.INDEX_NAME, indexNamePrefix);
	paramters.put(ElasticSearchSinkConstants.INDEX_TYPE, "clickevent");
	paramters.put(ElasticSearchSinkConstants.CLUSTER_NAME,
			"jai-testclusterName");
	paramters.put(ElasticSearchSinkConstants.BATCH_SIZE, "10");
	paramters.put(ElasticSearchSinkConstants.SERIALIZER,
			ElasticSearchJsonBodyEventSerializer.class.getName());

	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	sink.setChannel(channel);

	sink.start();
	channel.start();
}
 
Example 5
Source File: FlumeHDFSSinkServiceImpl.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
private void createSink() {
	sink = new HDFSEventSink();
	sink.setName("HDFSEventSink-" + UUID.randomUUID());
	channel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(channel, channelContext);
	channel.setName("HDFSEventSinkChannel-" + UUID.randomUUID());

	Map<String, String> paramters = new HashMap<>();
	paramters.put("hdfs.type", "hdfs");
	String hdfsBasePath = hadoopClusterService.getHDFSUri()
			+ "/searchevents";
	paramters.put("hdfs.path", hdfsBasePath + "/%Y/%m/%d/%H");
	paramters.put("hdfs.filePrefix", "searchevents");
	paramters.put("hdfs.fileType", "DataStream");
	paramters.put("hdfs.rollInterval", "0");
	paramters.put("hdfs.rollSize", "0");
	paramters.put("hdfs.idleTimeout", "1");
	paramters.put("hdfs.rollCount", "0");
	paramters.put("hdfs.batchSize", "1000");
	paramters.put("hdfs.useLocalTimeStamp", "true");

	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	sink.setChannel(channel);

	sink.start();
	channel.start();
}
 
Example 6
Source File: TestHTTPSource.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUpClass() throws Exception {
  selectedPort = findFreePort();

  source = new HTTPSource();
  channel = new MemoryChannel();

  Context ctx = new Context();
  ctx.put("capacity", "100");
  Configurables.configure(channel, ctx);

  List<Channel> channels = new ArrayList<Channel>(1);
  channels.add(channel);

  ChannelSelector rcs = new ReplicatingChannelSelector();
  rcs.setChannels(channels);

  source.setChannelProcessor(new ChannelProcessor(rcs));

  channel.start();
  Context context = new Context();

  context.put("port", String.valueOf(selectedPort));
  context.put("host", "0.0.0.0");

  Configurables.configure(source, context);
  source.start();
}
 
Example 7
Source File: MongoSinkUpdateInsteadReplaceTest.java    From ingestion with Apache License 2.0 5 votes vote down vote up
@Before
public void prepareMongo() throws Exception {
    fongo = new Fongo("mongo test server");

    Context mongoContext = new Context();
    mongoContext.put("batchSize", "3");
    mongoContext.put("mappingFile", "/mapping_definition_update.json");
    mongoContext.put("mongoUri", "INJECTED");
    mongoContext.put("dynamic", "true");
    mongoContext.put("updateInsteadReplace", "true");

    mongoSink = new MongoSink();

    injectFongo(mongoSink);
    Configurables.configure(mongoSink, mongoContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    mongoSink.setChannel(channel);

    channel.start();
    mongoSink.start();

}
 
Example 8
Source File: MongoSinkDynamicTest.java    From ingestion with Apache License 2.0 5 votes vote down vote up
@Before
public void prepareMongo() throws Exception {
    fongo = new Fongo("mongo test server");

    Context mongoContext = new Context();
    mongoContext.put("batchSize", "3");
    mongoContext.put("mappingFile", "/mapping_definition_1.json");
    mongoContext.put("mongoUri", "INJECTED");
    mongoContext.put("dynamic", "true");

    mongoSink = new MongoSink();

    injectFongo(mongoSink);
    Configurables.configure(mongoSink, mongoContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    mongoSink.setChannel(channel);

    channel.start();
    mongoSink.start();
}
 
Example 9
Source File: MongoSinkTest.java    From ingestion with Apache License 2.0 5 votes vote down vote up
@Before
public void prepareMongo() throws Exception {
    fongo = new Fongo("mongo test server");

    Context mongoContext = new Context();
    mongoContext.put("batchSize", "1");
    mongoContext.put("mappingFile", "/mapping_definition_1.json");
    mongoContext.put("mongoUri", "INJECTED");

    mongoSink = new MongoSink();

    injectFongo(mongoSink);
    Configurables.configure(mongoSink, mongoContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    mongoSink.setChannel(channel);

    channel.start();
    mongoSink.start();
}
 
Example 10
Source File: FlumeAgentServiceImpl.java    From searchanalytics-bigdata with MIT License 4 votes vote down vote up
@SuppressWarnings("unused")
private void createAvroSourceWithLocalFileRollingSink() {
	channel = new MemoryChannel();
	String channelName = "AvroSourceMemoryChannel-" + UUID.randomUUID();
	channel.setName(channelName);

	sink = new RollingFileSink();
	sink.setName("RollingFileSink-" + UUID.randomUUID());
	Map<String, String> paramters = new HashMap<>();
	paramters.put("type", "file_roll");
	paramters.put("sink.directory", "target/flumefilelog");
	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	Configurables.configure(channel, sinkContext);
	sink.setChannel(channel);

	final Map<String, String> properties = new HashMap<String, String>();
	properties.put("type", "avro");
	properties.put("bind", "localhost");
	properties.put("port", "44444");
	properties.put("selector.type", "multiplexing");
	properties.put("selector.header", "State");
	properties.put("selector.mapping.VIEWED", channelName);
	properties.put("selector.mapping.default", channelName);

	avroSource = new AvroSource();
	avroSource.setName("AvroSource-" + UUID.randomUUID());
	Context sourceContext = new Context(properties);
	avroSource.configure(sourceContext);
	ChannelSelector selector = new MultiplexingChannelSelector();
	List<Channel> channels = new ArrayList<>();
	channels.add(channel);
	selector.setChannels(channels);
	final Map<String, String> selectorProperties = new HashMap<String, String>();
	properties.put("default", channelName);
	Context selectorContext = new Context(selectorProperties);
	selector.configure(selectorContext);
	ChannelProcessor cp = new ChannelProcessor(selector);
	avroSource.setChannelProcessor(cp);

	sink.start();
	channel.start();
	avroSource.start();
}
 
Example 11
Source File: TestCensoringInterceptor.java    From mt-flume with Apache License 2.0 4 votes vote down vote up
@Test
public void testCensor() {

  MemoryChannel memCh = new MemoryChannel();
  memCh.configure(new Context());
  memCh.start();

  ChannelSelector cs = new ReplicatingChannelSelector();
  cs.setChannels(Lists.<Channel>newArrayList(memCh));
  ChannelProcessor cp = new ChannelProcessor(cs);

  // source config
  Map<String, String> cfgMap = Maps.newHashMap();
  cfgMap.put("interceptors", "a");
  String builderClass = CensoringInterceptor.Builder.class.getName();
  cfgMap.put("interceptors.a.type", builderClass);
  Context ctx = new Context(cfgMap);

  // setup
  cp.configure(ctx);
  cp.initialize();

  Map<String, String> headers = Maps.newHashMap();
  String badWord = "scribe";
  headers.put("Bad-Words", badWord);
  Event event1 = EventBuilder.withBody("test", Charsets.UTF_8, headers);
  Assert.assertEquals(badWord, event1.getHeaders().get("Bad-Words"));
  cp.processEvent(event1);

  Transaction tx = memCh.getTransaction();
  tx.begin();

  Event event1a = memCh.take();
  Assert.assertNull(event1a.getHeaders().get("Bad-Words"));

  tx.commit();
  tx.close();

  // cleanup / shutdown
  cp.close();
  memCh.stop();
}
 
Example 12
Source File: TestMultiportSyslogTCPSource.java    From mt-flume with Apache License 2.0 4 votes vote down vote up
/**
 * Test that different charsets are parsed by different ports correctly.
 */
@Test
public void testPortCharsetHandling() throws UnknownHostException, Exception {

  ///////////////////////////////////////////////////////
  // port setup

  InetAddress localAddr = InetAddress.getLocalHost();
  DefaultIoSessionDataStructureFactory dsFactory =
      new DefaultIoSessionDataStructureFactory();


  // one faker on port 10001
  int port1 = 10001;
  NioSession session1 = mock(NioSession.class);
  session1.setAttributeMap(dsFactory.getAttributeMap(session1));
  SocketAddress sockAddr1 = new InetSocketAddress(localAddr, port1);
  when(session1.getLocalAddress()).thenReturn(sockAddr1);

  // another faker on port 10002
  int port2 = 10002;
  NioSession session2 = mock(NioSession.class);
  session2.setAttributeMap(dsFactory.getAttributeMap(session2));
  SocketAddress sockAddr2 = new InetSocketAddress(localAddr, port2);
  when(session2.getLocalAddress()).thenReturn(sockAddr2);

  // set up expected charsets per port
  ConcurrentMap<Integer, ThreadSafeDecoder> portCharsets =
      new ConcurrentHashMap<Integer, ThreadSafeDecoder>();
  portCharsets.put(port1, new ThreadSafeDecoder(Charsets.ISO_8859_1));
  portCharsets.put(port2, new ThreadSafeDecoder(Charsets.UTF_8));

  ///////////////////////////////////////////////////////
  // channel / source setup

  // set up channel to receive events
  MemoryChannel chan = new MemoryChannel();
  chan.configure(new Context());
  chan.start();
  ReplicatingChannelSelector sel = new ReplicatingChannelSelector();
  sel.setChannels(Lists.<Channel>newArrayList(chan));
  ChannelProcessor chanProc = new ChannelProcessor(sel);

  // defaults to UTF-8
  MultiportSyslogHandler handler = new MultiportSyslogHandler(
      1000, 10, chanProc, new SourceCounter("test"), "port",
      new ThreadSafeDecoder(Charsets.UTF_8), portCharsets);

  // initialize buffers
  handler.sessionCreated(session1);
  handler.sessionCreated(session2);

  ///////////////////////////////////////////////////////
  // event setup

  // Create events of varying charsets.
  String header = "<10>2012-08-17T02:14:00-07:00 192.168.1.110 ";

  // These chars encode under ISO-8859-1 as illegal bytes under UTF-8.
  String dangerousChars = "þÿÀÁ";

  ///////////////////////////////////////////////////////
  // encode and send them through the message handler
  String msg;
  IoBuffer buf;
  Event evt;

  // valid ISO-8859-1 on the right (ISO-8859-1) port
  msg = header + dangerousChars + "\n";
  buf = IoBuffer.wrap(msg.getBytes(Charsets.ISO_8859_1));
  handler.messageReceived(session1, buf);
  evt = takeEvent(chan);
  Assert.assertNotNull("Event vanished!", evt);
  Assert.assertNull(evt.getHeaders().get(SyslogUtils.EVENT_STATUS));

  // valid ISO-8859-1 on the wrong (UTF-8) port
  msg = header + dangerousChars + "\n";
  buf = IoBuffer.wrap(msg.getBytes(Charsets.ISO_8859_1));
  handler.messageReceived(session2, buf);
  evt = takeEvent(chan);
  Assert.assertNotNull("Event vanished!", evt);
  Assert.assertEquals("Expected invalid event due to character encoding",
      SyslogUtils.SyslogStatus.INVALID.getSyslogStatus(),
      evt.getHeaders().get(SyslogUtils.EVENT_STATUS));

  // valid UTF-8 on the right (UTF-8) port
  msg = header + dangerousChars + "\n";
  buf = IoBuffer.wrap(msg.getBytes(Charsets.UTF_8));
  handler.messageReceived(session2, buf);
  evt = takeEvent(chan);
  Assert.assertNotNull("Event vanished!", evt);
  Assert.assertNull(evt.getHeaders().get(SyslogUtils.EVENT_STATUS));
}
 
Example 13
Source File: TestHDFSEventSinkOnMiniCluster.java    From mt-flume with Apache License 2.0 4 votes vote down vote up
/**
 * This is a very basic test that writes one event to HDFS and reads it back.
 */
@Test
public void simpleHDFSTest() throws EventDeliveryException, IOException {
  cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
  cluster.waitActive();

  String outputDir = "/flume/simpleHDFSTest";
  Path outputDirPath = new Path(outputDir);

  logger.info("Running test with output dir: {}", outputDir);

  FileSystem fs = cluster.getFileSystem();
  // ensure output directory is empty
  if (fs.exists(outputDirPath)) {
    fs.delete(outputDirPath, true);
  }

  String nnURL = getNameNodeURL(cluster);
  logger.info("Namenode address: {}", nnURL);

  Context chanCtx = new Context();
  MemoryChannel channel = new MemoryChannel();
  channel.setName("simpleHDFSTest-mem-chan");
  channel.configure(chanCtx);
  channel.start();

  Context sinkCtx = new Context();
  sinkCtx.put("hdfs.path", nnURL + outputDir);
  sinkCtx.put("hdfs.fileType", HDFSWriterFactory.DataStreamType);
  sinkCtx.put("hdfs.batchSize", Integer.toString(1));

  HDFSEventSink sink = new HDFSEventSink();
  sink.setName("simpleHDFSTest-hdfs-sink");
  sink.configure(sinkCtx);
  sink.setChannel(channel);
  sink.start();

  // create an event
  String EVENT_BODY = "yarg!";
  channel.getTransaction().begin();
  try {
    channel.put(EventBuilder.withBody(EVENT_BODY, Charsets.UTF_8));
    channel.getTransaction().commit();
  } finally {
    channel.getTransaction().close();
  }

  // store event to HDFS
  sink.process();

  // shut down flume
  sink.stop();
  channel.stop();

  // verify that it's in HDFS and that its content is what we say it should be
  FileStatus[] statuses = fs.listStatus(outputDirPath);
  Assert.assertNotNull("No files found written to HDFS", statuses);
  Assert.assertEquals("Only one file expected", 1, statuses.length);

  for (FileStatus status : statuses) {
    Path filePath = status.getPath();
    logger.info("Found file on DFS: {}", filePath);
    FSDataInputStream stream = fs.open(filePath);
    BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
    String line = reader.readLine();
    logger.info("First line in file {}: {}", filePath, line);
    Assert.assertEquals(EVENT_BODY, line);
  }

  if (!KEEP_DATA) {
    fs.delete(outputDirPath, true);
  }

  cluster.shutdown();
  cluster = null;
}
 
Example 14
Source File: TestHDFSEventSinkOnMiniCluster.java    From mt-flume with Apache License 2.0 4 votes vote down vote up
/**
 * Writes two events in GZIP-compressed serialize.
 */
@Test
public void simpleHDFSGZipCompressedTest() throws EventDeliveryException, IOException {
  cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
  cluster.waitActive();

  String outputDir = "/flume/simpleHDFSGZipCompressedTest";
  Path outputDirPath = new Path(outputDir);

  logger.info("Running test with output dir: {}", outputDir);

  FileSystem fs = cluster.getFileSystem();
  // ensure output directory is empty
  if (fs.exists(outputDirPath)) {
    fs.delete(outputDirPath, true);
  }

  String nnURL = getNameNodeURL(cluster);
  logger.info("Namenode address: {}", nnURL);

  Context chanCtx = new Context();
  MemoryChannel channel = new MemoryChannel();
  channel.setName("simpleHDFSTest-mem-chan");
  channel.configure(chanCtx);
  channel.start();

  Context sinkCtx = new Context();
  sinkCtx.put("hdfs.path", nnURL + outputDir);
  sinkCtx.put("hdfs.fileType", HDFSWriterFactory.CompStreamType);
  sinkCtx.put("hdfs.batchSize", Integer.toString(1));
  sinkCtx.put("hdfs.codeC", "gzip");

  HDFSEventSink sink = new HDFSEventSink();
  sink.setName("simpleHDFSTest-hdfs-sink");
  sink.configure(sinkCtx);
  sink.setChannel(channel);
  sink.start();

  // create an event
  String EVENT_BODY_1 = "yarg1";
  String EVENT_BODY_2 = "yarg2";
  channel.getTransaction().begin();
  try {
    channel.put(EventBuilder.withBody(EVENT_BODY_1, Charsets.UTF_8));
    channel.put(EventBuilder.withBody(EVENT_BODY_2, Charsets.UTF_8));
    channel.getTransaction().commit();
  } finally {
    channel.getTransaction().close();
  }

  // store event to HDFS
  sink.process();

  // shut down flume
  sink.stop();
  channel.stop();

  // verify that it's in HDFS and that its content is what we say it should be
  FileStatus[] statuses = fs.listStatus(outputDirPath);
  Assert.assertNotNull("No files found written to HDFS", statuses);
  Assert.assertEquals("Only one file expected", 1, statuses.length);

  for (FileStatus status : statuses) {
    Path filePath = status.getPath();
    logger.info("Found file on DFS: {}", filePath);
    FSDataInputStream stream = fs.open(filePath);
    BufferedReader reader = new BufferedReader(new InputStreamReader(
        new GZIPInputStream(stream)));
    String line = reader.readLine();
    logger.info("First line in file {}: {}", filePath, line);
    Assert.assertEquals(EVENT_BODY_1, line);

    // The rest of this test is commented-out (will fail) for 2 reasons:
    //
    // (1) At the time of this writing, Hadoop has a bug which causes the
    // non-native gzip implementation to create invalid gzip files when
    // finish() and resetState() are called. See HADOOP-8522.
    //
    // (2) Even if HADOOP-8522 is fixed, the JDK GZipInputStream is unable
    // to read multi-member (concatenated) gzip files. See this Sun bug:
    // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4691425
    //
    //line = reader.readLine();
    //logger.info("Second line in file {}: {}", filePath, line);
    //Assert.assertEquals(EVENT_BODY_2, line);
  }

  if (!KEEP_DATA) {
    fs.delete(outputDirPath, true);
  }

  cluster.shutdown();
  cluster = null;
}
 
Example 15
Source File: KafkaSinkTestIT.java    From ingestion with Apache License 2.0 3 votes vote down vote up
@Before
public void setUp() {

    conf= ConfigFactory.load();

    ZOOKEEPER_HOSTS = StringUtils.join(conf.getStringList("zookeeper.hosts"), ",");
    KAFKA_HOSTS = conf.getStringList("kafka.hosts");

    LOGGER.info("Using Zookeeper hosts: " + ZOOKEEPER_HOSTS);
    LOGGER.info("Using Zookeeper hosts: " + KAFKA_HOSTS);

    String[] connection = KAFKA_HOSTS.get(0).split(":");

    simpleConsumer = new SimpleConsumer(connection[0], Integer.parseInt(connection[1]), 60000, 1024, CLIENT_ID);

    kafkaSink = new KafkaSink();

    Context kafkaContext = new Context();
    kafkaContext.put("topic", "test");
    kafkaContext.put("writeBody", "false");
    kafkaContext.put("kafka.metadata.broker.list", StringUtils.join(KAFKA_HOSTS, ","));
    kafkaContext.put("kafka.serializer.class", "kafka.serializer.StringEncoder");

    Configurables.configure(kafkaSink, kafkaContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    kafkaSink.setChannel(channel);

    channel.start();
    kafkaSink.start();

}