Java Code Examples for org.apache.flume.channel.MemoryChannel#setName()
The following examples show how to use
org.apache.flume.channel.MemoryChannel#setName() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FlumeAgentServiceImpl.java From searchanalytics-bigdata with MIT License | 6 votes |
private void createSparkAvroSink() { sparkAvroChannel = new MemoryChannel(); Map<String, String> channelParamters = new HashMap<>(); channelParamters.put("capacity", "100000"); channelParamters.put("transactionCapacity", "1000"); Context channelContext = new Context(channelParamters); Configurables.configure(sparkAvroChannel, channelContext); String channelName = "SparkAvroMemoryChannel-" + UUID.randomUUID(); sparkAvroChannel.setName(channelName); sparkAvroSink = new AvroSink(); sparkAvroSink.setName("SparkAvroSink-" + UUID.randomUUID()); Map<String, String> paramters = new HashMap<>(); paramters.put("type", "avro"); paramters.put("hostname", "localhost"); paramters.put("port", "41111"); paramters.put("batch-size", "100"); Context sinkContext = new Context(paramters); sparkAvroSink.configure(sinkContext); Configurables.configure(sparkAvroSink, sinkContext); sparkAvroSink.setChannel(sparkAvroChannel); sparkAvroChannel.start(); sparkAvroSink.start(); }
Example 2
Source File: DruidSinkIT.java From ingestion with Apache License 2.0 | 6 votes |
@Before public void setup() { // Context channelContext = new Context(); // channelContext.put("checkpointDir","data/check"); // channelContext.put("dataDirs","data/data"); // channelContext.put("capacity","1000"); // channelContext.put("transactionCapacity","100"); // channelContext.put("checkpointInterval","300"); // channel = new FileChannel(); Context channelContext = new Context(); channelContext.put("capacity", "10000"); channelContext.put("transactionCapacity", "5000"); channel = new MemoryChannel(); channel.setName("junitChannel"); Configurables.configure(channel, channelContext); channel.start(); druidSink = new DruidSink(); druidSink.setChannel(channel); druidSink.configure(getMockContext()); druidSink.start(); }
Example 3
Source File: FlumeHbaseSinkServiceImpl.java From searchanalytics-bigdata with MIT License | 5 votes |
private void createSink() { channel = new MemoryChannel(); Map<String, String> channelParamters = new HashMap<>(); channelParamters.put("capacity", "100000"); channelParamters.put("transactionCapacity", "1000"); Context channelContext = new Context(channelParamters); Configurables.configure(channel, channelContext); channel.setName("HbaseSinkChannel-" + UUID.randomUUID()); sink = new HBaseSink(); sink.setName("HbaseSink-" + UUID.randomUUID()); Map<String, String> paramters = new HashMap<>(); paramters.put(HBaseSinkConfigurationConstants.CONFIG_TABLE, "searchclicks"); paramters.put(HBaseSinkConfigurationConstants.CONFIG_COLUMN_FAMILY, new String(HbaseJsonEventSerializer.COLUMFAMILY_CLIENT_BYTES)); paramters.put(HBaseSinkConfigurationConstants.CONFIG_BATCHSIZE, "1000"); // paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER, RegexHbaseEventSerializer.class.getName()); // paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER + "." + RegexHbaseEventSerializer.REGEX_CONFIG, RegexHbaseEventSerializer.REGEX_DEFAULT); // paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER + "." + RegexHbaseEventSerializer.IGNORE_CASE_CONFIG, "true"); // paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER + "." + RegexHbaseEventSerializer.COL_NAME_CONFIG, "json"); paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER, HbaseJsonEventSerializer.class.getName()); Context sinkContext = new Context(paramters); sink.configure(sinkContext); sink.setChannel(channel); sink.start(); channel.start(); }
Example 4
Source File: FlumeESSinkServiceImpl.java From searchanalytics-bigdata with MIT License | 5 votes |
private void createSink() { sink = new ElasticSearchSink(); sink.setName("ElasticSearchSink-" + UUID.randomUUID()); channel = new MemoryChannel(); Map<String, String> channelParamters = new HashMap<>(); channelParamters.put("capacity", "100000"); channelParamters.put("transactionCapacity", "1000"); Context channelContext = new Context(channelParamters); Configurables.configure(channel, channelContext); channel.setName("ElasticSearchSinkChannel-" + UUID.randomUUID()); Map<String, String> paramters = new HashMap<>(); paramters.put(ElasticSearchSinkConstants.HOSTNAMES, "127.0.0.1:9310"); String indexNamePrefix = "recentlyviewed"; paramters.put(ElasticSearchSinkConstants.INDEX_NAME, indexNamePrefix); paramters.put(ElasticSearchSinkConstants.INDEX_TYPE, "clickevent"); paramters.put(ElasticSearchSinkConstants.CLUSTER_NAME, "jai-testclusterName"); paramters.put(ElasticSearchSinkConstants.BATCH_SIZE, "10"); paramters.put(ElasticSearchSinkConstants.SERIALIZER, ElasticSearchJsonBodyEventSerializer.class.getName()); Context sinkContext = new Context(paramters); sink.configure(sinkContext); sink.setChannel(channel); sink.start(); channel.start(); }
Example 5
Source File: FlumeHDFSSinkServiceImpl.java From searchanalytics-bigdata with MIT License | 5 votes |
private void createSink() { sink = new HDFSEventSink(); sink.setName("HDFSEventSink-" + UUID.randomUUID()); channel = new MemoryChannel(); Map<String, String> channelParamters = new HashMap<>(); channelParamters.put("capacity", "100000"); channelParamters.put("transactionCapacity", "1000"); Context channelContext = new Context(channelParamters); Configurables.configure(channel, channelContext); channel.setName("HDFSEventSinkChannel-" + UUID.randomUUID()); Map<String, String> paramters = new HashMap<>(); paramters.put("hdfs.type", "hdfs"); String hdfsBasePath = hadoopClusterService.getHDFSUri() + "/searchevents"; paramters.put("hdfs.path", hdfsBasePath + "/%Y/%m/%d/%H"); paramters.put("hdfs.filePrefix", "searchevents"); paramters.put("hdfs.fileType", "DataStream"); paramters.put("hdfs.rollInterval", "0"); paramters.put("hdfs.rollSize", "0"); paramters.put("hdfs.idleTimeout", "1"); paramters.put("hdfs.rollCount", "0"); paramters.put("hdfs.batchSize", "1000"); paramters.put("hdfs.useLocalTimeStamp", "true"); Context sinkContext = new Context(paramters); sink.configure(sinkContext); sink.setChannel(channel); sink.start(); channel.start(); }
Example 6
Source File: MongoSinkUpdateInsteadReplaceTest.java From ingestion with Apache License 2.0 | 5 votes |
@Before public void prepareMongo() throws Exception { fongo = new Fongo("mongo test server"); Context mongoContext = new Context(); mongoContext.put("batchSize", "3"); mongoContext.put("mappingFile", "/mapping_definition_update.json"); mongoContext.put("mongoUri", "INJECTED"); mongoContext.put("dynamic", "true"); mongoContext.put("updateInsteadReplace", "true"); mongoSink = new MongoSink(); injectFongo(mongoSink); Configurables.configure(mongoSink, mongoContext); Context channelContext = new Context(); channelContext.put("capacity", "10000"); channelContext.put("transactionCapacity", "200"); channel = new MemoryChannel(); channel.setName("junitChannel"); Configurables.configure(channel, channelContext); mongoSink.setChannel(channel); channel.start(); mongoSink.start(); }
Example 7
Source File: MongoSinkDynamicTest.java From ingestion with Apache License 2.0 | 5 votes |
@Before public void prepareMongo() throws Exception { fongo = new Fongo("mongo test server"); Context mongoContext = new Context(); mongoContext.put("batchSize", "3"); mongoContext.put("mappingFile", "/mapping_definition_1.json"); mongoContext.put("mongoUri", "INJECTED"); mongoContext.put("dynamic", "true"); mongoSink = new MongoSink(); injectFongo(mongoSink); Configurables.configure(mongoSink, mongoContext); Context channelContext = new Context(); channelContext.put("capacity", "10000"); channelContext.put("transactionCapacity", "200"); channel = new MemoryChannel(); channel.setName("junitChannel"); Configurables.configure(channel, channelContext); mongoSink.setChannel(channel); channel.start(); mongoSink.start(); }
Example 8
Source File: MongoSinkTest.java From ingestion with Apache License 2.0 | 5 votes |
@Before public void prepareMongo() throws Exception { fongo = new Fongo("mongo test server"); Context mongoContext = new Context(); mongoContext.put("batchSize", "1"); mongoContext.put("mappingFile", "/mapping_definition_1.json"); mongoContext.put("mongoUri", "INJECTED"); mongoSink = new MongoSink(); injectFongo(mongoSink); Configurables.configure(mongoSink, mongoContext); Context channelContext = new Context(); channelContext.put("capacity", "10000"); channelContext.put("transactionCapacity", "200"); channel = new MemoryChannel(); channel.setName("junitChannel"); Configurables.configure(channel, channelContext); mongoSink.setChannel(channel); channel.start(); mongoSink.start(); }
Example 9
Source File: CassandraSinkIT.java From ingestion with Apache License 2.0 | 5 votes |
private void _do() throws TTransportException, IOException, InterruptedException { final Context context = new Context(); final InetSocketAddress contactPoint = CassandraTestHelper.getCassandraContactPoint(); context.put("tables", "keyspaceTestCassandraSinkIT.tableTestCassandraSinkIT"); context.put("hosts", contactPoint.getAddress().getHostAddress()); context.put("batchSize", "1"); context.put("consistency", "QUORUM"); final File cqlFile = File.createTempFile("flumeTest", "cql"); cqlFile.deleteOnExit(); IOUtils.write( "CREATE KEYSPACE IF NOT EXISTS keyspaceTestCassandraSinkIT WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\n" + "CREATE TABLE IF NOT EXISTS keyspaceTestCassandraSinkIT.tableTestCassandraSinkIT (" + "id uuid, bool_field boolean, int_field int, PRIMARY KEY (int_field)" + ");\n\n", new FileOutputStream(cqlFile)); context.put("cqlFile", cqlFile.getAbsolutePath()); sink = new CassandraSink(); sink.configure(context); Context channelContext = new Context(); channelContext.put("capacity", "10000"); channelContext.put("transactionCapacity", "200"); channel = new MemoryChannel(); channel.setName("junitChannel"); Configurables.configure(channel, channelContext); sink.setChannel(channel); sink.start(); sink.stop(); }
Example 10
Source File: FlumeAgentServiceImpl.java From searchanalytics-bigdata with MIT License | 4 votes |
@SuppressWarnings("unused") private void createAvroSourceWithLocalFileRollingSink() { channel = new MemoryChannel(); String channelName = "AvroSourceMemoryChannel-" + UUID.randomUUID(); channel.setName(channelName); sink = new RollingFileSink(); sink.setName("RollingFileSink-" + UUID.randomUUID()); Map<String, String> paramters = new HashMap<>(); paramters.put("type", "file_roll"); paramters.put("sink.directory", "target/flumefilelog"); Context sinkContext = new Context(paramters); sink.configure(sinkContext); Configurables.configure(channel, sinkContext); sink.setChannel(channel); final Map<String, String> properties = new HashMap<String, String>(); properties.put("type", "avro"); properties.put("bind", "localhost"); properties.put("port", "44444"); properties.put("selector.type", "multiplexing"); properties.put("selector.header", "State"); properties.put("selector.mapping.VIEWED", channelName); properties.put("selector.mapping.default", channelName); avroSource = new AvroSource(); avroSource.setName("AvroSource-" + UUID.randomUUID()); Context sourceContext = new Context(properties); avroSource.configure(sourceContext); ChannelSelector selector = new MultiplexingChannelSelector(); List<Channel> channels = new ArrayList<>(); channels.add(channel); selector.setChannels(channels); final Map<String, String> selectorProperties = new HashMap<String, String>(); properties.put("default", channelName); Context selectorContext = new Context(selectorProperties); selector.configure(selectorContext); ChannelProcessor cp = new ChannelProcessor(selector); avroSource.setChannelProcessor(cp); sink.start(); channel.start(); avroSource.start(); }
Example 11
Source File: TestHDFSEventSinkOnMiniCluster.java From mt-flume with Apache License 2.0 | 4 votes |
/** * This is a very basic test that writes one event to HDFS and reads it back. */ @Test public void simpleHDFSTest() throws EventDeliveryException, IOException { cluster = new MiniDFSCluster(new Configuration(), 1, true, null); cluster.waitActive(); String outputDir = "/flume/simpleHDFSTest"; Path outputDirPath = new Path(outputDir); logger.info("Running test with output dir: {}", outputDir); FileSystem fs = cluster.getFileSystem(); // ensure output directory is empty if (fs.exists(outputDirPath)) { fs.delete(outputDirPath, true); } String nnURL = getNameNodeURL(cluster); logger.info("Namenode address: {}", nnURL); Context chanCtx = new Context(); MemoryChannel channel = new MemoryChannel(); channel.setName("simpleHDFSTest-mem-chan"); channel.configure(chanCtx); channel.start(); Context sinkCtx = new Context(); sinkCtx.put("hdfs.path", nnURL + outputDir); sinkCtx.put("hdfs.fileType", HDFSWriterFactory.DataStreamType); sinkCtx.put("hdfs.batchSize", Integer.toString(1)); HDFSEventSink sink = new HDFSEventSink(); sink.setName("simpleHDFSTest-hdfs-sink"); sink.configure(sinkCtx); sink.setChannel(channel); sink.start(); // create an event String EVENT_BODY = "yarg!"; channel.getTransaction().begin(); try { channel.put(EventBuilder.withBody(EVENT_BODY, Charsets.UTF_8)); channel.getTransaction().commit(); } finally { channel.getTransaction().close(); } // store event to HDFS sink.process(); // shut down flume sink.stop(); channel.stop(); // verify that it's in HDFS and that its content is what we say it should be FileStatus[] statuses = fs.listStatus(outputDirPath); Assert.assertNotNull("No files found written to HDFS", statuses); Assert.assertEquals("Only one file expected", 1, statuses.length); for (FileStatus status : statuses) { Path filePath = status.getPath(); logger.info("Found file on DFS: {}", filePath); FSDataInputStream stream = fs.open(filePath); BufferedReader reader = new BufferedReader(new InputStreamReader(stream)); String line = reader.readLine(); logger.info("First line in file {}: {}", filePath, line); Assert.assertEquals(EVENT_BODY, line); } if (!KEEP_DATA) { fs.delete(outputDirPath, true); } cluster.shutdown(); cluster = null; }
Example 12
Source File: TestHDFSEventSinkOnMiniCluster.java From mt-flume with Apache License 2.0 | 4 votes |
/** * Writes two events in GZIP-compressed serialize. */ @Test public void simpleHDFSGZipCompressedTest() throws EventDeliveryException, IOException { cluster = new MiniDFSCluster(new Configuration(), 1, true, null); cluster.waitActive(); String outputDir = "/flume/simpleHDFSGZipCompressedTest"; Path outputDirPath = new Path(outputDir); logger.info("Running test with output dir: {}", outputDir); FileSystem fs = cluster.getFileSystem(); // ensure output directory is empty if (fs.exists(outputDirPath)) { fs.delete(outputDirPath, true); } String nnURL = getNameNodeURL(cluster); logger.info("Namenode address: {}", nnURL); Context chanCtx = new Context(); MemoryChannel channel = new MemoryChannel(); channel.setName("simpleHDFSTest-mem-chan"); channel.configure(chanCtx); channel.start(); Context sinkCtx = new Context(); sinkCtx.put("hdfs.path", nnURL + outputDir); sinkCtx.put("hdfs.fileType", HDFSWriterFactory.CompStreamType); sinkCtx.put("hdfs.batchSize", Integer.toString(1)); sinkCtx.put("hdfs.codeC", "gzip"); HDFSEventSink sink = new HDFSEventSink(); sink.setName("simpleHDFSTest-hdfs-sink"); sink.configure(sinkCtx); sink.setChannel(channel); sink.start(); // create an event String EVENT_BODY_1 = "yarg1"; String EVENT_BODY_2 = "yarg2"; channel.getTransaction().begin(); try { channel.put(EventBuilder.withBody(EVENT_BODY_1, Charsets.UTF_8)); channel.put(EventBuilder.withBody(EVENT_BODY_2, Charsets.UTF_8)); channel.getTransaction().commit(); } finally { channel.getTransaction().close(); } // store event to HDFS sink.process(); // shut down flume sink.stop(); channel.stop(); // verify that it's in HDFS and that its content is what we say it should be FileStatus[] statuses = fs.listStatus(outputDirPath); Assert.assertNotNull("No files found written to HDFS", statuses); Assert.assertEquals("Only one file expected", 1, statuses.length); for (FileStatus status : statuses) { Path filePath = status.getPath(); logger.info("Found file on DFS: {}", filePath); FSDataInputStream stream = fs.open(filePath); BufferedReader reader = new BufferedReader(new InputStreamReader( new GZIPInputStream(stream))); String line = reader.readLine(); logger.info("First line in file {}: {}", filePath, line); Assert.assertEquals(EVENT_BODY_1, line); // The rest of this test is commented-out (will fail) for 2 reasons: // // (1) At the time of this writing, Hadoop has a bug which causes the // non-native gzip implementation to create invalid gzip files when // finish() and resetState() are called. See HADOOP-8522. // // (2) Even if HADOOP-8522 is fixed, the JDK GZipInputStream is unable // to read multi-member (concatenated) gzip files. See this Sun bug: // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4691425 // //line = reader.readLine(); //logger.info("Second line in file {}: {}", filePath, line); //Assert.assertEquals(EVENT_BODY_2, line); } if (!KEEP_DATA) { fs.delete(outputDirPath, true); } cluster.shutdown(); cluster = null; }
Example 13
Source File: CassandraDataTypesIT.java From ingestion with Apache License 2.0 | 4 votes |
@Before public void setup() throws TTransportException, IOException, InterruptedException { final Context context = new Context(); final InetSocketAddress contactPoint = CassandraTestHelper.getCassandraContactPoint(); context.put("tables", KEYSPACE + "." + TABLE); context.put("hosts", contactPoint.getAddress().getHostAddress()); context.put("batchSize", "1"); context.put("consistency", "QUORUM"); Cluster cluster = Cluster.builder() .addContactPointsWithPorts(Collections.singletonList(contactPoint)) .build(); Session session = cluster.connect(); session.execute( "CREATE KEYSPACE IF NOT EXISTS keyspaceTest WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };"); session.execute("CREATE TABLE if not exists keyspaceTest.tableTest (" + PRIMARY_KEY + " uuid, " + TEXT_FIELD + " text, " + VARCHAR_FIELD + " varchar, " + VARINT_FIELD + " varint, " + ASCII_FIELD + " ascii, " + BOOLEAN_FIELD + " boolean, " + DECIMAL_FIELD + " decimal, " + DOUBLE_FIELD + " double, " + FLOAT_FIELD + " float, " + INET_FIELD + " inet, " + INT_FIELD + " int, " + LIST_FIELD + " list<TEXT>, " + MAP_FIELD + " map<TEXT,INT>, " + SET_FIELD + " set<TEXT>, " + TIMESTAMP_FIELD + " timestamp, " + UUID_FIELD + " uuid, " + BIGINT_FIELD + " bigint, PRIMARY KEY (" + PRIMARY_KEY + "));"); session.close(); cluster.close(); sink = new CassandraSink(); sink.configure(context); Context channelContext = new Context(); channelContext.put("capacity", "10000"); channelContext.put("transactionCapacity", "200"); channel = new MemoryChannel(); channel.setName("junitChannel"); Configurables.configure(channel, channelContext); sink.setChannel(channel); sink.start(); headers = new HashMap<String, String>(); headers.put(PRIMARY_KEY, UUID.randomUUID().toString()); }
Example 14
Source File: KafkaSinkTestIT.java From ingestion with Apache License 2.0 | 3 votes |
@Before public void setUp() { conf= ConfigFactory.load(); ZOOKEEPER_HOSTS = StringUtils.join(conf.getStringList("zookeeper.hosts"), ","); KAFKA_HOSTS = conf.getStringList("kafka.hosts"); LOGGER.info("Using Zookeeper hosts: " + ZOOKEEPER_HOSTS); LOGGER.info("Using Zookeeper hosts: " + KAFKA_HOSTS); String[] connection = KAFKA_HOSTS.get(0).split(":"); simpleConsumer = new SimpleConsumer(connection[0], Integer.parseInt(connection[1]), 60000, 1024, CLIENT_ID); kafkaSink = new KafkaSink(); Context kafkaContext = new Context(); kafkaContext.put("topic", "test"); kafkaContext.put("writeBody", "false"); kafkaContext.put("kafka.metadata.broker.list", StringUtils.join(KAFKA_HOSTS, ",")); kafkaContext.put("kafka.serializer.class", "kafka.serializer.StringEncoder"); Configurables.configure(kafkaSink, kafkaContext); Context channelContext = new Context(); channelContext.put("capacity", "10000"); channelContext.put("transactionCapacity", "200"); channel = new MemoryChannel(); channel.setName("junitChannel"); Configurables.configure(channel, channelContext); kafkaSink.setChannel(channel); channel.start(); kafkaSink.start(); }