Java Code Examples for org.apache.flume.Context#put()
The following examples show how to use
org.apache.flume.Context#put() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestMorphlineInterceptor.java From mt-flume with Apache License 2.0 | 6 votes |
@Test public void testNoOperation() throws Exception { Context context = new Context(); context.put(MorphlineHandlerImpl.MORPHLINE_FILE_PARAM, RESOURCES_DIR + "/test-morphlines/noOperation.conf"); Event input = EventBuilder.withBody("foo", Charsets.UTF_8); input.getHeaders().put("name", "nadja"); MorphlineInterceptor interceptor = build(context); Event actual = interceptor.intercept(input); interceptor.close(); Event expected = EventBuilder.withBody("foo".getBytes("UTF-8"), ImmutableMap.of("name", "nadja")); assertEqualsEvent(expected, actual); List<Event> actualList = build(context).intercept(Collections.singletonList(input)); List<Event> expectedList = Collections.singletonList(expected); assertEqualsEventList(expectedList, actualList); }
Example 2
Source File: TestRegexEventSerializer.java From phoenix with BSD 3-Clause "New" or "Revised" License | 6 votes |
private void initSinkContextWithDefaults(final String fullTableName) { Preconditions.checkNotNull(fullTableName); sinkContext = new Context (); String ddl = "CREATE TABLE " + fullTableName + " (flume_time timestamp not null, col1 varchar , col2 varchar" + " CONSTRAINT pk PRIMARY KEY (flume_time))\n"; sinkContext.put(FlumeConstants.CONFIG_TABLE, fullTableName); sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, TestUtil.PHOENIX_JDBC_URL); sinkContext.put(FlumeConstants.CONFIG_SERIALIZER,EventSerializers.REGEX.name()); sinkContext.put(FlumeConstants.CONFIG_TABLE_DDL, ddl); sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_REGULAR_EXPRESSION,"^([^\t]+)\t([^\t]+)$"); sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES,"col1,col2"); sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR,DefaultKeyGenerator.TIMESTAMP.name()); }
Example 3
Source File: TestLog4jAppender.java From mt-flume with Apache License 2.0 | 6 votes |
@Before public void initiate() throws Exception{ int port = 25430; source = new AvroSource(); ch = new MemoryChannel(); Configurables.configure(ch, new Context()); Context context = new Context(); context.put("port", String.valueOf(port)); context.put("bind", "localhost"); Configurables.configure(source, context); File TESTFILE = new File( TestLog4jAppender.class.getClassLoader() .getResource("flume-log4jtest.properties").getFile()); FileReader reader = new FileReader(TESTFILE); props = new Properties(); props.load(reader); reader.close(); }
Example 4
Source File: NGSIPostgreSQLSinkTest.java From fiware-cygnus with GNU Affero General Public License v3.0 | 6 votes |
private Context createContext(String attrPersistence, String batchSize, String batchTime, String batchTTL, String dataModel, String enableEncoding, String enableGrouping, String enableLowercase, String host, String password, String port, String username, String cache, String sqlOptions) { Context context = new Context(); context.put("attr_persistence", attrPersistence); context.put("batch_size", batchSize); context.put("batch_time", batchTime); context.put("batch_ttl", batchTTL); context.put("data_model", dataModel); context.put("enable_encoding", enableEncoding); context.put("enable_grouping", enableGrouping); context.put("enable_lowercase", enableLowercase); context.put("postgresql_host", host); context.put("postgresql_password", password); context.put("postgresql_port", port); context.put("postgresql_username", username); context.put("backend.enable_cache", cache); context.put("postgresql_options", sqlOptions); return context; }
Example 5
Source File: TestTimestampInterceptor.java From mt-flume with Apache License 2.0 | 6 votes |
/** * Ensure timestamp IS overwritten when preserveExistingTimestamp == false */ @Test public void testClobber() throws ClassNotFoundException, InstantiationException, IllegalAccessException { Context ctx = new Context(); ctx.put("preserveExisting", "false"); // DEFAULT BEHAVIOR InterceptorBuilderFactory factory = new InterceptorBuilderFactory(); Interceptor.Builder builder = InterceptorBuilderFactory.newInstance( InterceptorType.TIMESTAMP.toString()); builder.configure(ctx); Interceptor interceptor = builder.build(); long originalTs = 1L; Event event = EventBuilder.withBody("test event", Charsets.UTF_8); event.getHeaders().put(Constants.TIMESTAMP, Long.toString(originalTs)); Assert.assertEquals(Long.toString(originalTs), event.getHeaders().get(Constants.TIMESTAMP)); Long now = System.currentTimeMillis(); event = interceptor.intercept(event); String timestampStr = event.getHeaders().get(Constants.TIMESTAMP); Assert.assertNotNull(timestampStr); Assert.assertTrue(Long.parseLong(timestampStr) >= now); }
Example 6
Source File: TestStaticInterceptor.java From mt-flume with Apache License 2.0 | 6 votes |
@Test public void testCustomKeyValue() throws ClassNotFoundException, InstantiationException, IllegalAccessException { Interceptor.Builder builder = InterceptorBuilderFactory.newInstance( InterceptorType.STATIC.toString()); Context ctx = new Context(); ctx.put(Constants.KEY, "myKey"); ctx.put(Constants.VALUE, "myVal"); builder.configure(ctx); Interceptor interceptor = builder.build(); Event event = EventBuilder.withBody("test", Charsets.UTF_8); Assert.assertNull(event.getHeaders().get("myKey")); event = interceptor.intercept(event); String val = event.getHeaders().get("myKey"); Assert.assertNotNull(val); Assert.assertEquals("myVal", val); }
Example 7
Source File: TestRegexExtractorInterceptorMillisSerializer.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void shouldReturnMillisFromPattern() { RegexExtractorInterceptorMillisSerializer fixture = new RegexExtractorInterceptorMillisSerializer(); Context context = new Context(); String pattern = "yyyy-MM-dd HH:mm:ss"; context.put("pattern", pattern); fixture.configure(context); DateTimeFormatter formatter = DateTimeFormat.forPattern(pattern); long time = (System.currentTimeMillis() / 1000L) * 1000L; Assert.assertEquals(String.valueOf(time), fixture.serialize(formatter.print(time))); }
Example 8
Source File: TestUUIDInterceptor.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testPrefix() throws Exception { Context context = new Context(); context.put(UUIDInterceptor.HEADER_NAME, ID); context.put(UUIDInterceptor.PREFIX_NAME, "bar#"); Event event = new SimpleEvent(); assertTrue(build(context).intercept(event).getHeaders().get(ID).startsWith("bar#")); }
Example 9
Source File: XmlXpathDeserializerTest.java From ingestion with Apache License 2.0 | 5 votes |
@Test() public void testXPathWithNS() throws IOException { Context context = new Context(); context.put("expression", "/bookstore/book"); EventDeserializer des = new XmlXpathDeserializer.Builder().build(context, getTestInputStream("ns.xml")); List<Event> events = des.readEvents(4); assertEquals(4, events.size()); for (final Event event : events) { assertNotNull(event); } }
Example 10
Source File: MorphlineSolrSink.java From mt-flume with Apache License 2.0 | 5 votes |
@Override public void configure(Context context) { if (context.getString(FaultTolerance.RECOVERABLE_EXCEPTION_CLASSES) == null) { context.put(FaultTolerance.RECOVERABLE_EXCEPTION_CLASSES, "org.apache.solr.client.solrj.SolrServerException"); } super.configure(context); }
Example 11
Source File: KafkaSinkUtilTest.java From flume-ng-kafka-sink with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { Context context = new Context(); context.put("consumer.timeout", "10"); context.put("type", "KafkaSource"); context.put("topic", "test"); props = KafkaSinkUtil.getKafkaConfigProperties(context); }
Example 12
Source File: TestPhoenixSink.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
private Channel initChannel() { //Channel configuration Context channelContext = new Context(); channelContext.put("capacity", "10000"); channelContext.put("transactionCapacity", "200"); Channel channel = new MemoryChannel(); channel.setName("memorychannel"); Configurables.configure(channel, channelContext); return channel; }
Example 13
Source File: TestCassandraSink.java From ingestion with Apache License 2.0 | 5 votes |
@Ignore @Test public void processIllegalArgumentException() throws EventDeliveryException { final CassandraSink sink = new CassandraSink(); final Channel channel = mock(Channel.class); final Transaction tx = mock(Transaction.class); final CassandraTable table = mock(CassandraTable.class); final Context ctx = new Context(); ctx.put("tables", "keyspace.table"); sink.configure(ctx); sink.tables = Collections.singletonList(table); sink.setChannel(channel); when(channel.getTransaction()).thenReturn(tx); final Event event = EventBuilder.withBody(new byte[0], ImmutableMap.of("id", "1", "col", "text")); when(channel.take()).thenReturn(event).thenReturn(null); doThrow(IllegalArgumentException.class).when(table).save(anyListOf(Event.class)); boolean hasThrown = false; try { sink.process(); } catch (EventDeliveryException ex) { hasThrown = true; if (!(ex.getCause() instanceof IllegalArgumentException)) { fail("Did not throw inner IllegalArgumentException: " + ex); } } verify(tx).begin(); verify(tx).rollback(); verify(tx).close(); verifyNoMoreInteractions(tx); if (!hasThrown) { fail("Did not throw exception"); } }
Example 14
Source File: TestHDFSCompressedDataStream.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testGzipDurabilityWithSerializer() throws Exception { Context context = new Context(); context.put("serializer", "AVRO_EVENT"); HDFSCompressedDataStream writer = new HDFSCompressedDataStream(); writer.configure(context); writer.open(fileURI, factory.getCodec(new Path(fileURI)), SequenceFile.CompressionType.BLOCK); String[] bodies = { "yarf!", "yarfing!" }; writeBodies(writer, bodies); int found = 0; int expected = bodies.length; List<String> expectedBodies = Lists.newArrayList(bodies); GZIPInputStream cmpIn = new GZIPInputStream(new FileInputStream(file)); DatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>(); DataFileStream<GenericRecord> avroStream = new DataFileStream<GenericRecord>(cmpIn, reader); GenericRecord record = new GenericData.Record(avroStream.getSchema()); while (avroStream.hasNext()) { avroStream.next(record); CharsetDecoder decoder = Charsets.UTF_8.newDecoder(); String bodyStr = decoder.decode((ByteBuffer) record.get("body")) .toString(); expectedBodies.remove(bodyStr); found++; } avroStream.close(); cmpIn.close(); Assert.assertTrue("Found = " + found + ", Expected = " + expected + ", Left = " + expectedBodies.size() + " " + expectedBodies, expectedBodies.size() == 0); }
Example 15
Source File: TestHDFSEventSink.java From mt-flume with Apache License 2.0 | 4 votes |
@Test public void testAppend() throws InterruptedException, LifecycleException, EventDeliveryException, IOException { LOG.debug("Starting..."); final long rollCount = 3; final long batchSize = 2; final String fileName = "FlumeData"; // clear the test directory Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); Path dirPath = new Path(testPath); fs.delete(dirPath, true); fs.mkdirs(dirPath); Context context = new Context(); context.put("hdfs.path", testPath + "/%Y-%m-%d/%H"); context.put("hdfs.timeZone", "UTC"); context.put("hdfs.filePrefix", fileName); context.put("hdfs.rollCount", String.valueOf(rollCount)); context.put("hdfs.batchSize", String.valueOf(batchSize)); Configurables.configure(sink, context); Channel channel = new MemoryChannel(); Configurables.configure(channel, context); sink.setChannel(channel); sink.start(); Calendar eventDate = Calendar.getInstance(); List<String> bodies = Lists.newArrayList(); // push the event batches into channel for (int i = 1; i < 4; i++) { Transaction txn = channel.getTransaction(); txn.begin(); for (int j = 1; j <= batchSize; j++) { Event event = new SimpleEvent(); eventDate.clear(); eventDate.set(2011, i, i, i, 0); // yy mm dd event.getHeaders().put("timestamp", String.valueOf(eventDate.getTimeInMillis())); event.getHeaders().put("hostname", "Host" + i); String body = "Test." + i + "." + j; event.setBody(body.getBytes()); bodies.add(body); channel.put(event); } txn.commit(); txn.close(); // execute sink to process the events sink.process(); } sink.stop(); verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies); }
Example 16
Source File: TestMemoryChannelTransaction.java From mt-flume with Apache License 2.0 | 4 votes |
@Test public void testRollBack() throws InterruptedException, EventDeliveryException { Event event, event2; Context context = new Context(); int putCounter = 0; context.put("keep-alive", "1"); Configurables.configure(channel, context); Transaction transaction = channel.getTransaction(); Assert.assertNotNull(transaction); // add events and rollback txn transaction.begin(); for (putCounter = 0; putCounter < 10; putCounter++) { event = EventBuilder.withBody(("test event" + putCounter).getBytes()); channel.put(event); } transaction.rollback(); transaction.close(); // verify that no events are stored due to rollback transaction = channel.getTransaction(); transaction.begin(); event2 = channel.take(); Assert.assertNull("extra event found", event2); transaction.commit(); transaction.close(); // add events and commit transaction = channel.getTransaction(); transaction.begin(); for (putCounter = 0; putCounter < 10; putCounter++) { event = EventBuilder.withBody(("test event" + putCounter).getBytes()); channel.put(event); } transaction.commit(); transaction.close(); transaction = channel.getTransaction(); Assert.assertNotNull(transaction); // verify events are there, then rollback the take transaction.begin(); for (int i = 0; i < 10; i++) { event2 = channel.take(); Assert.assertNotNull("lost an event", event2); Assert.assertArrayEquals(event2.getBody(), ("test event" + i).getBytes()); } event2 = channel.take(); Assert.assertNull("extra event found", event2); transaction.rollback(); transaction.close(); // verify that the events were left in there due to rollback transaction = channel.getTransaction(); transaction.begin(); for (int i = 0; i < 10; i++) { event2 = channel.take(); Assert.assertNotNull("lost an event", event2); Assert.assertArrayEquals(event2.getBody(), ("test event" + i).getBytes()); } event2 = channel.take(); Assert.assertNull("extra event found", event2); transaction.rollback(); transaction.close(); }
Example 17
Source File: JDBCSinkTest.java From ingestion with Apache License 2.0 | 4 votes |
@Test public void mappedWithDerby() throws Exception { FileUtils.deleteDirectory(new File("test_derby_db")); Class.forName("org.apache.derby.jdbc.EmbeddedDriver").newInstance(); Connection conn = DriverManager.getConnection("jdbc:derby:test_derby_db;create=true"); conn.prepareStatement("CREATE TABLE TEST (myInteger INT, myString VARCHAR(255), myId INT NOT NULL GENERATED ALWAYS AS IDENTITY \n" + "\t(START WITH 1, INCREMENT BY 1), PRIMARY KEY(myId))").execute(); conn.commit(); conn.close(); Context ctx = new Context(); ctx.put("driver", "org.apache.derby.jdbc.EmbeddedDriver"); ctx.put("connectionString", "jdbc:derby:test_derby_db"); ctx.put("table", "test"); ctx.put("sqlDialect", "DERBY"); ctx.put("batchSize", "1"); JDBCSink jdbcSink = new JDBCSink(); Configurables.configure(jdbcSink, ctx); Context channelContext = new Context(); channelContext.put("capacity", "10000"); channelContext.put("transactionCapacity", "200"); Channel channel = new MemoryChannel(); channel.setName("junitChannel"); Configurables.configure(channel, channelContext); jdbcSink.setChannel(channel); channel.start(); jdbcSink.start(); Transaction tx = channel.getTransaction(); tx.begin(); Map<String, String> headers = new HashMap<String, String>(); headers.put("myString", "bar"); // Overwrites the value defined in JSON body headers.put("myInteger", "64"); headers.put("myBoolean", "true"); headers.put("myDouble", "1.0"); headers.put("myNull", "foobar"); Date myDate = new Date(); headers.put("myDate", Long.toString(myDate.getTime())); headers.put("myString2", "baz"); Event event = EventBuilder.withBody(new byte[0], headers); channel.put(event); tx.commit(); tx.close(); jdbcSink.process(); jdbcSink.stop(); channel.stop(); conn = DriverManager.getConnection("jdbc:derby:test_derby_db"); ResultSet resultSet = conn.prepareStatement("SELECT * FROM TEST").executeQuery(); resultSet.next(); assertThat(resultSet.getInt("myInteger")).isEqualTo(64); assertThat(resultSet.getString("myString")).isEqualTo("bar"); conn.close(); try { DriverManager.getConnection("jdbc:derby:;shutdown=true"); } catch (SQLException ex) { } }
Example 18
Source File: TestMemoryChannelTransaction.java From mt-flume with Apache License 2.0 | 4 votes |
@Ignore("BasicChannelSemantics doesn't support re-entrant transactions") @Test public void testReEntTxnRollBack() throws InterruptedException, EventDeliveryException { Event event, event2; Context context = new Context(); int putCounter = 0; context.put("keep-alive", "1"); Configurables.configure(channel, context); Transaction transaction = channel.getTransaction(); Assert.assertNotNull(transaction); // add events and rollback txn transaction.begin(); for (putCounter = 0; putCounter < 10; putCounter++) { event = EventBuilder.withBody(("test event" + putCounter).getBytes()); channel.put(event); } transaction.rollback(); transaction.close(); // verify that no events are stored due to rollback transaction = channel.getTransaction(); transaction.begin(); event2 = channel.take(); Assert.assertNull("extra event found", event2); transaction.commit(); transaction.close(); // add events and commit transaction = channel.getTransaction(); transaction.begin(); for (putCounter = 0; putCounter < 10; putCounter++) { event = EventBuilder.withBody(("test event" + putCounter).getBytes()); channel.put(event); } transaction.commit(); transaction.close(); transaction = channel.getTransaction(); Assert.assertNotNull(transaction); // verify events are there, then rollback the take transaction.begin(); for (int i = 0; i < 10; i++) { transaction.begin(); // inner begin event2 = channel.take(); Assert.assertNotNull("lost an event", event2); Assert.assertArrayEquals(event2.getBody(), ("test event" + i).getBytes()); transaction.commit(); // inner commit } event2 = channel.take(); Assert.assertNull("extra event found", event2); transaction.rollback(); transaction.close(); // verify that the events were left in there due to rollback transaction = channel.getTransaction(); transaction.begin(); for (int i = 0; i < 10; i++) { event2 = channel.take(); Assert.assertNotNull("lost an event", event2); Assert.assertArrayEquals(event2.getBody(), ("test event" + i).getBytes()); } event2 = channel.take(); Assert.assertNull("extra event found", event2); transaction.rollback(); transaction.close(); }
Example 19
Source File: TestHDFSEventSink.java From mt-flume with Apache License 2.0 | 4 votes |
@Test public void testSimpleAppend() throws InterruptedException, LifecycleException, EventDeliveryException, IOException { LOG.debug("Starting..."); final String fileName = "FlumeData"; final long rollCount = 5; final long batchSize = 2; final int numBatches = 4; String newPath = testPath + "/singleBucket"; int totalEvents = 0; int i = 1, j = 1; // clear the test directory Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); Path dirPath = new Path(newPath); fs.delete(dirPath, true); fs.mkdirs(dirPath); Context context = new Context(); context.put("hdfs.path", newPath); context.put("hdfs.filePrefix", fileName); context.put("hdfs.rollCount", String.valueOf(rollCount)); context.put("hdfs.batchSize", String.valueOf(batchSize)); Configurables.configure(sink, context); Channel channel = new MemoryChannel(); Configurables.configure(channel, context); sink.setChannel(channel); sink.start(); Calendar eventDate = Calendar.getInstance(); List<String> bodies = Lists.newArrayList(); // push the event batches into channel for (i = 1; i < numBatches; i++) { Transaction txn = channel.getTransaction(); txn.begin(); for (j = 1; j <= batchSize; j++) { Event event = new SimpleEvent(); eventDate.clear(); eventDate.set(2011, i, i, i, 0); // yy mm dd event.getHeaders().put("timestamp", String.valueOf(eventDate.getTimeInMillis())); event.getHeaders().put("hostname", "Host" + i); String body = "Test." + i + "." + j; event.setBody(body.getBytes()); bodies.add(body); channel.put(event); totalEvents++; } txn.commit(); txn.close(); // execute sink to process the events sink.process(); } sink.stop(); // loop through all the files generated and check their contains FileStatus[] dirStat = fs.listStatus(dirPath); Path fList[] = FileUtil.stat2Paths(dirStat); // check that the roll happened correctly for the given data long expectedFiles = totalEvents / rollCount; if (totalEvents % rollCount > 0) expectedFiles++; Assert.assertEquals("num files wrong, found: " + Lists.newArrayList(fList), expectedFiles, fList.length); verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies); }
Example 20
Source File: NGSIGroupingInterceptorTest.java From fiware-cygnus with GNU Affero General Public License v3.0 | 4 votes |
private Context createBuilderContext(String enableEncoding, String groupingRulesConfFile) { Context context = new Context(); context.put("enable_encoding", enableEncoding); context.put("grouping_rules_conf_file", groupingRulesConfFile); return context; }