kafka.utils.MockTime Java Examples
The following examples show how to use
kafka.utils.MockTime.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaClusterTestBase.java From incubator-gobblin with Apache License 2.0 | 6 votes |
private KafkaServer createKafkaServer(int brokerId,String _zkConnectString){ int _brokerId = brokerId; int _kafkaServerPort = TestUtils.findFreePort(); Properties props = kafka.utils.TestUtils.createBrokerConfig( _brokerId, _zkConnectString, kafka.utils.TestUtils.createBrokerConfig$default$3(), kafka.utils.TestUtils.createBrokerConfig$default$4(), _kafkaServerPort, kafka.utils.TestUtils.createBrokerConfig$default$6(), kafka.utils.TestUtils.createBrokerConfig$default$7(), kafka.utils.TestUtils.createBrokerConfig$default$8(), kafka.utils.TestUtils.createBrokerConfig$default$9(), kafka.utils.TestUtils.createBrokerConfig$default$10(), kafka.utils.TestUtils.createBrokerConfig$default$11(), kafka.utils.TestUtils.createBrokerConfig$default$12(), kafka.utils.TestUtils.createBrokerConfig$default$13(), kafka.utils.TestUtils.createBrokerConfig$default$14() ); KafkaConfig config = new KafkaConfig(props); Time mock = new MockTime(); KafkaServer _kafkaServer = kafka.utils.TestUtils.createServer(config, mock); kafkaBrokerPortList.add(_kafkaServerPort); return _kafkaServer; }
Example #2
Source File: KafkaTestBase.java From incubator-gobblin with Apache License 2.0 | 6 votes |
void start() throws RuntimeException { if (_numStarted.incrementAndGet() == 1) { log.warn("Starting up Kafka server suite. Zk at " + _zkConnectString + "; Kafka server at " + _kafkaServerPort); _zkServer = new EmbeddedZookeeper(_zkConnectString); _zkClient = new ZkClient(_zkConnectString, 30000, 30000, ZKStringSerializer$.MODULE$); Properties props = kafka.utils.TestUtils.createBrokerConfig(_brokerId, _kafkaServerPort, true); props.setProperty("zookeeper.connect", _zkConnectString); KafkaConfig config = new KafkaConfig(props); Time mock = new MockTime(); _kafkaServer = kafka.utils.TestUtils.createServer(config, mock); } else { log.info("Kafka server suite already started... continuing"); } }
Example #3
Source File: KafkaTestBase.java From incubator-gobblin with Apache License 2.0 | 6 votes |
public static void startServer() throws RuntimeException { if (serverStarted && serverClosed) { throw new RuntimeException("Kafka test server has already been closed. Cannot generate Kafka server twice."); } if (!serverStarted) { serverStarted = true; zkConnect = TestZKUtils.zookeeperConnect(); zkServer = new EmbeddedZookeeper(zkConnect); zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$); kafkaPort = TestUtils.choosePort(); Properties props = TestUtils.createBrokerConfig(brokerId, kafkaPort, true); KafkaConfig config = new KafkaConfig(props); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); } }
Example #4
Source File: KafkaExportITBase.java From rya with Apache License 2.0 | 6 votes |
/** * setup mini kafka and call the super to setup mini fluo */ @Before public void setupKafka() throws Exception { // Install an instance of Rya on the Accumulo cluster. installRyaInstance(); // Setup Kafka. zkServer = new EmbeddedZookeeper(); final String zkConnect = ZKHOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); zkUtils = ZkUtils.apply(zkClient, false); // setup Broker final Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zkConnect); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString()); brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT); final KafkaConfig config = new KafkaConfig(brokerProps); final Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); }
Example #5
Source File: KafkaDestinationProcessorTest.java From incubator-samoa with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUpClass() throws IOException { // setup Zookeeper zkServer = new EmbeddedZookeeper(); zkConnect = ZKHOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); ZkUtils zkUtils = ZkUtils.apply(zkClient, false); // setup Broker Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zkConnect); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString()); brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT); KafkaConfig config = new KafkaConfig(brokerProps); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); // create topic AdminUtils.createTopic(zkUtils, TOPIC, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$); }
Example #6
Source File: KafkaUtilsTest.java From incubator-samoa with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUpClass() throws IOException { // setup Zookeeper zkServer = new EmbeddedZookeeper(); zkConnect = ZKHOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); ZkUtils zkUtils = ZkUtils.apply(zkClient, false); // setup Broker Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zkConnect); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafkaUtils-").toAbsolutePath().toString()); brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT); KafkaConfig config = new KafkaConfig(brokerProps); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); // create topics AdminUtils.createTopic(zkUtils, TOPIC_R, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$); AdminUtils.createTopic(zkUtils, TOPIC_S, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$); }
Example #7
Source File: KafkaEntranceProcessorTest.java From incubator-samoa with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUpClass() throws IOException { // setup Zookeeper zkServer = new EmbeddedZookeeper(); zkConnect = ZKHOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); ZkUtils zkUtils = ZkUtils.apply(zkClient, false); // setup Broker Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zkConnect); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString()); brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT); KafkaConfig config = new KafkaConfig(brokerProps); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); // create topics AdminUtils.createTopic(zkUtils, TOPIC_OOS, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$); }
Example #8
Source File: EmbeddedKafkaInstance.java From rya with Apache License 2.0 | 6 votes |
/** * Starts the Embedded Kafka and Zookeeper Servers. * @throws Exception - If an exeption occurs during startup. */ protected void startup() throws Exception { // Setup the embedded zookeeper logger.info("Starting up Embedded Zookeeper..."); zkServer = new EmbeddedZookeeper(); zookeperConnect = ZKHOST + ":" + zkServer.port(); logger.info("Embedded Zookeeper started at: {}", zookeperConnect); // setup Broker logger.info("Starting up Embedded Kafka..."); brokerPort = Integer.toString(PortUtils.getRandomFreePort()); final Properties brokerProps = new Properties(); brokerProps.setProperty(KafkaConfig$.MODULE$.BrokerIdProp(), "0"); brokerProps.setProperty(KafkaConfig$.MODULE$.HostNameProp(), BROKERHOST); brokerProps.setProperty(KafkaConfig$.MODULE$.PortProp(), brokerPort); brokerProps.setProperty(KafkaConfig$.MODULE$.ZkConnectProp(), zookeperConnect); brokerProps.setProperty(KafkaConfig$.MODULE$.LogDirsProp(), Files.createTempDirectory(getClass().getSimpleName() + "-").toAbsolutePath().toString()); brokerProps.setProperty(KafkaConfig$.MODULE$.DeleteTopicEnableProp(), "true"); final KafkaConfig config = new KafkaConfig(brokerProps); final Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); logger.info("Embedded Kafka Server started at: {}:{}", BROKERHOST, brokerPort); }
Example #9
Source File: UserTaskManagerTest.java From cruise-control with BSD 2-Clause "Simplified" License | 5 votes |
@Test public void testExpireSession() throws Exception { UUID testUserTaskId = UUID.randomUUID(); UserTaskManager.UUIDGenerator mockUUIDGenerator = EasyMock.mock(UserTaskManager.UUIDGenerator.class); EasyMock.expect(mockUUIDGenerator.randomUUID()).andReturn(testUserTaskId).anyTimes(); Time mockTime = new MockTime(); HttpSession mockHttpSession = EasyMock.mock(HttpSession.class); EasyMock.expect(mockHttpSession.getLastAccessedTime()).andReturn(mockTime.milliseconds()).anyTimes(); mockHttpSession.invalidate(); HttpServletRequest mockHttpServletRequest = prepareRequest(mockHttpSession, null); OperationFuture future = new OperationFuture("future"); UserTaskManager userTaskManager = new UserTaskManager(1000, 1, TimeUnit.HOURS.toMillis(6), 100, mockTime, mockUUIDGenerator); HttpServletResponse mockHttpServletResponse = EasyMock.mock(HttpServletResponse.class); mockHttpServletResponse.setHeader(EasyMock.anyString(), EasyMock.anyString()); EasyMock.replay(mockUUIDGenerator, mockHttpSession, mockHttpServletResponse); // test-case: test if the sessions are removed on expiration OperationFuture future1 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest, mockHttpServletResponse, uuid -> future, 0, true, null).get(0); Assert.assertEquals(future, future1); mockTime.sleep(1001); Thread.sleep(TimeUnit.SECONDS.toMillis(UserTaskManager.USER_TASK_SCANNER_PERIOD_SECONDS + 1)); OperationFuture future2 = userTaskManager.getFuture(mockHttpServletRequest); Assert.assertNull(future2); userTaskManager.close(); }
Example #10
Source File: KafkaComponent.java From metron with Apache License 2.0 | 5 votes |
@Override public void start() { // setup Zookeeper zookeeperConnectString = topologyProperties.getProperty(ZKServerComponent.ZOOKEEPER_PROPERTY); zkClient = new ZkClient(zookeeperConnectString, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, ZKStringSerializer$.MODULE$); // setup Broker Properties props = TestUtilsWrapper.createBrokerConfig(0, zookeeperConnectString, brokerPort); props.setProperty("zookeeper.connection.timeout.ms", Integer.toString(KAFKA_ZOOKEEPER_TIMEOUT_MS)); KafkaConfig config = new KafkaConfig(props); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); org.apache.log4j.Level oldLevel = UnitTestHelper.getLog4jLevel(KafkaServer.class); UnitTestHelper.setLog4jLevel(KafkaServer.class, org.apache.log4j.Level.OFF); // do not proceed until the broker is up TestUtilsWrapper.waitUntilBrokerIsRunning(kafkaServer,"Timed out waiting for RunningAsBroker State",100000); for(Topic topic : getTopics()) { try { createTopic(topic.name, topic.numPartitions, KAFKA_PROPAGATE_TIMEOUT_MS); } catch (InterruptedException e) { throw new RuntimeException("Unable to create topic", e); } } UnitTestHelper.setLog4jLevel(KafkaServer.class, oldLevel); if(postStartCallback != null) { postStartCallback.apply(this); } }
Example #11
Source File: EmbeddedKafka.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
public void start() throws IOException { // Find port try { ServerSocket serverSocket = new ServerSocket(0); BROKERPORT = Integer.toString(serverSocket.getLocalPort()); serverSocket.close(); } catch (IOException e) { throw Throwables.propagate(e); } // Setup Zookeeper zkServer = new EmbeddedZookeeper(); String zkConnect = BROKERHOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); zkUtils = ZkUtils.apply(zkClient, false); // Setup brokers cleanupDir(); Properties props = new Properties(); props.setProperty("zookeeper.connect", zkConnect); props.setProperty("broker.id", "0"); props.setProperty("log.dirs", KAFKA_PATH); props.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT); KafkaConfig config = new KafkaConfig(props); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); }
Example #12
Source File: EmbeddedKafkaCluster.java From common-docker with Apache License 2.0 | 5 votes |
private void startBroker(int brokerId, String zkConnectString) throws IOException { if (brokerId < 0) { throw new IllegalArgumentException("broker id must not be negative"); } Properties props = TestUtils .createBrokerConfig( brokerId, zkConnectString, ENABLE_CONTROLLED_SHUTDOWN, ENABLE_DELETE_TOPIC, 0, INTER_BROKER_SECURITY_PROTOCOL, this.brokerTrustStoreFile, this.brokerSaslProperties, ENABLE_PLAINTEXT, ENABLE_SASL_PLAINTEXT, SASL_PLAINTEXT_PORT, ENABLE_SSL, SSL_PORT, this.enableSASLSSL, 0, Option.<String>empty(), 1, false, NUM_PARTITIONS, DEFAULT_REPLICATION_FACTOR ); KafkaServer broker = TestUtils.createServer(KafkaConfig.fromProps(props), new MockTime()); brokersById.put(brokerId, broker); }
Example #13
Source File: UserTaskManagerTest.java From cruise-control with BSD 2-Clause "Simplified" License | 5 votes |
@Test public void testMaximumActiveTasks() { HttpSession mockHttpSession1 = EasyMock.mock(HttpSession.class); EasyMock.expect(mockHttpSession1.getLastAccessedTime()).andReturn(100L).anyTimes(); HttpServletRequest mockHttpServletRequest1 = prepareRequest(mockHttpSession1, null); OperationFuture future = new OperationFuture("future"); UserTaskManager userTaskManager = new UserTaskManager(1000, 1, TimeUnit.HOURS.toMillis(6), 100, new MockTime()); HttpServletResponse mockHttpServletResponse = EasyMock.mock(HttpServletResponse.class); mockHttpServletResponse.setHeader(EasyMock.anyString(), EasyMock.anyString()); EasyMock.replay(mockHttpSession1, mockHttpServletResponse); // test-case: test max limitation active tasks OperationFuture future1 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest1, mockHttpServletResponse, uuid -> future, 0, true, null).get(0); Assert.assertEquals(future, future1); HttpSession mockHttpSession2 = EasyMock.mock(HttpSession.class); EasyMock.expect(mockHttpSession2.getLastAccessedTime()).andReturn(100L).anyTimes(); EasyMock.replay(mockHttpSession2); EasyMock.reset(mockHttpServletResponse); HttpServletRequest mockHttpServletRequest2 = prepareRequest(mockHttpSession2, null, "/test2", Collections.emptyMap()); try { OperationFuture future2 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest2, mockHttpServletResponse, uuid -> future, 0, true, null).get(0); Assert.assertEquals(future, future2); } catch (RuntimeException e) { userTaskManager.close(); return; } Assert.fail("Don't expect to be here!"); }
Example #14
Source File: UserTaskManagerTest.java From cruise-control with BSD 2-Clause "Simplified" License | 5 votes |
@Test public void testCompletedTasks() throws Exception { HttpSession mockHttpSession = EasyMock.mock(HttpSession.class); EasyMock.expect(mockHttpSession.getLastAccessedTime()).andReturn(100L).anyTimes(); mockHttpSession.invalidate(); HttpServletRequest mockHttpServletRequest = prepareRequest(mockHttpSession, null); UserTaskManager.UUIDGenerator mockUUIDGenerator = EasyMock.mock(UserTaskManager.UUIDGenerator.class); EasyMock.expect(mockUUIDGenerator.randomUUID()).andReturn(UUID.randomUUID()).anyTimes(); OperationFuture future = new OperationFuture("future"); UserTaskManager userTaskManager = new UserTaskManager(1000, 1, TimeUnit.HOURS.toMillis(6), 100, new MockTime(), mockUUIDGenerator); HttpServletResponse mockHttpServletResponse = EasyMock.mock(HttpServletResponse.class); Capture<String> userTaskHeader = Capture.newInstance(); Capture<String> userTaskHeaderValue = Capture.newInstance(); mockHttpServletResponse.setHeader(EasyMock.capture(userTaskHeader), EasyMock.capture(userTaskHeaderValue)); EasyMock.replay(mockUUIDGenerator, mockHttpSession, mockHttpServletResponse); // test-case: verify if the background cleaner task removes tasks that are completed OperationFuture future1 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest, mockHttpServletResponse, uuid -> future, 0, true, null).get(0); Assert.assertEquals(future, future1); future1.cancel(true); Thread.sleep(TimeUnit.SECONDS.toMillis(UserTaskManager.USER_TASK_SCANNER_PERIOD_SECONDS * 4)); Assert.assertTrue(future.isDone()); Assert.assertTrue(future.isCancelled()); userTaskManager.close(); }
Example #15
Source File: UserTaskManagerTest.java From cruise-control with BSD 2-Clause "Simplified" License | 5 votes |
@Test public void testAddStepsFutures() { UUID testUserTaskId = UUID.randomUUID(); UserTaskManager.UUIDGenerator mockUUIDGenerator = EasyMock.mock(UserTaskManager.UUIDGenerator.class); EasyMock.expect(mockUUIDGenerator.randomUUID()).andReturn(testUserTaskId).anyTimes(); HttpSession mockHttpSession = EasyMock.mock(HttpSession.class); // Change mock session's last access time to always return current time to avoid unintended recycling of session. EasyMock.expect(mockHttpSession.getLastAccessedTime()).andReturn(System.currentTimeMillis()).anyTimes(); HttpServletRequest mockHttpServletRequest = prepareRequest(mockHttpSession, null); HttpServletResponse mockHttpServletResponse = EasyMock.mock(HttpServletResponse.class); mockHttpServletResponse.setHeader(EasyMock.anyString(), EasyMock.anyString()); EasyMock.replay(mockUUIDGenerator, mockHttpSession, mockHttpServletResponse); UserTaskManager userTaskManager = new UserTaskManager(1000, 1, TimeUnit.HOURS.toMillis(6), 100, new MockTime(), mockUUIDGenerator); OperationFuture testFuture1 = new OperationFuture("testFuture1"); OperationFuture testFuture2 = new OperationFuture("testFuture2"); OperationFuture insertedFuture1 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest, mockHttpServletResponse, uuid -> testFuture1, 0, true, null).get(0); Assert.assertEquals(testFuture1, insertedFuture1); EasyMock.reset(mockHttpServletResponse); OperationFuture insertedFuture2 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest, mockHttpServletResponse, uuid -> testFuture2, 1, true, null).get(1); Assert.assertEquals(testFuture2, insertedFuture2); Assert.assertEquals(userTaskManager.getUserTaskByUserTaskId(testUserTaskId, mockHttpServletRequest).futures().size(), 2); userTaskManager.close(); }
Example #16
Source File: KafkaTestBase.java From incubator-gobblin with Apache License 2.0 | 5 votes |
void start() throws RuntimeException { if (_numStarted.incrementAndGet() == 1) { log.warn("Starting up Kafka server suite. Zk at " + _zkConnectString + "; Kafka server at " + _kafkaServerPort); _zkServer = new EmbeddedZookeeper(); _zkConnectString = "127.0.0.1:"+_zkServer.port(); _zkClient = new ZkClient(_zkConnectString, 30000, 30000, ZKStringSerializer$.MODULE$); Properties props = kafka.utils.TestUtils.createBrokerConfig( _brokerId, _zkConnectString, kafka.utils.TestUtils.createBrokerConfig$default$3(), kafka.utils.TestUtils.createBrokerConfig$default$4(), _kafkaServerPort, kafka.utils.TestUtils.createBrokerConfig$default$6(), kafka.utils.TestUtils.createBrokerConfig$default$7(), kafka.utils.TestUtils.createBrokerConfig$default$8(), kafka.utils.TestUtils.createBrokerConfig$default$9(), kafka.utils.TestUtils.createBrokerConfig$default$10(), kafka.utils.TestUtils.createBrokerConfig$default$11(), kafka.utils.TestUtils.createBrokerConfig$default$12(), kafka.utils.TestUtils.createBrokerConfig$default$13(), kafka.utils.TestUtils.createBrokerConfig$default$14() ); KafkaConfig config = new KafkaConfig(props); Time mock = new MockTime(); _kafkaServer = kafka.utils.TestUtils.createServer(config, mock); } else { log.info("Kafka server suite already started... continuing"); } }
Example #17
Source File: ITZipkinReceiver.java From incubator-retired-htrace with Apache License 2.0 | 4 votes |
@Test public void testKafkaTransport() throws Exception { String topic = "zipkin"; // Kafka setup EmbeddedZookeeper zkServer = new EmbeddedZookeeper(TestZKUtils.zookeeperConnect()); ZkClient zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$); Properties props = TestUtils.createBrokerConfig(0, TestUtils.choosePort(), false); KafkaConfig config = new KafkaConfig(props); KafkaServer kafkaServer = TestUtils.createServer(config, new MockTime()); Buffer<KafkaServer> servers = JavaConversions.asScalaBuffer(Collections.singletonList(kafkaServer)); TestUtils.createTopic(zkClient, topic, 1, 1, servers, new Properties()); zkClient.close(); TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0, 5000); // HTrace HTraceConfiguration hTraceConfiguration = HTraceConfiguration.fromKeyValuePairs( "sampler.classes", "AlwaysSampler", "span.receiver.classes", ZipkinSpanReceiver.class.getName(), "zipkin.kafka.metadata.broker.list", config.advertisedHostName() + ":" + config.advertisedPort(), "zipkin.kafka.topic", topic, ZipkinSpanReceiver.TRANSPORT_CLASS_KEY, KafkaTransport.class.getName() ); final Tracer tracer = new Tracer.Builder("test-tracer") .tracerPool(new TracerPool("test-tracer-pool")) .conf(hTraceConfiguration) .build(); String scopeName = "test-kafka-transport-scope"; TraceScope traceScope = tracer.newScope(scopeName); traceScope.close(); tracer.close(); // Kafka consumer Properties consumerProps = new Properties(); consumerProps.put("zookeeper.connect", props.getProperty("zookeeper.connect")); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "testing.group"); consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "smallest"); ConsumerConnector connector = kafka.consumer.Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(consumerProps)); Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topicCountMap); ConsumerIterator<byte[], byte[]> it = streams.get(topic).get(0).iterator(); // Test Assert.assertTrue("We should have one message in Kafka", it.hasNext()); Span span = new Span(); new TDeserializer(new TBinaryProtocol.Factory()).deserialize(span, it.next().message()); Assert.assertEquals("The span name should match our scope description", span.getName(), scopeName); kafkaServer.shutdown(); }
Example #18
Source File: EmbeddedKafka.java From mongo-kafka with Apache License 2.0 | 4 votes |
/** Creates and starts the cluster. */ public void start() throws Exception { LOGGER.debug("Initiating embedded Kafka cluster startup"); LOGGER.debug("Starting a ZooKeeper instance..."); zookeeper = new ZooKeeperEmbedded(); LOGGER.debug("ZooKeeper instance is running at {}", zookeeper.connectString()); zkClient = KafkaZkClient.apply( zookeeper.connectString(), JaasUtils.isZkSecurityEnabled(), 30000, 30000, 1000, new MockTime(), "kafka.server", "SessionExpireListener"); final Properties effectiveBrokerConfig = effectiveBrokerConfigFrom(brokerConfig, zookeeper); LOGGER.debug( "Starting a Kafka instance on port {} ...", effectiveBrokerConfig.getProperty(KafkaConfig$.MODULE$.PortProp())); broker = new KafkaEmbedded(effectiveBrokerConfig, new MockTime()); LOGGER.debug( "Kafka instance is running at {}, connected to ZooKeeper at {}", broker.brokerList(), broker.zookeeperConnect()); final Properties schemaRegistryProps = new Properties(); schemaRegistryProps.put( SchemaRegistryConfig.KAFKASTORE_TIMEOUT_CONFIG, KAFKASTORE_OPERATION_TIMEOUT_MS); schemaRegistryProps.put(SchemaRegistryConfig.DEBUG_CONFIG, KAFKASTORE_DEBUG); schemaRegistryProps.put( SchemaRegistryConfig.KAFKASTORE_INIT_TIMEOUT_CONFIG, KAFKASTORE_INIT_TIMEOUT); schemaRegistry = new RestApp( 0, zookeeperConnect(), KAFKA_SCHEMAS_TOPIC, AVRO_COMPATIBILITY_TYPE, schemaRegistryProps); schemaRegistry.start(); LOGGER.debug("Starting a Connect standalone instance..."); connect = new ConnectStandalone(connectWorkerConfig()); connect.start(); LOGGER.debug("Connect standalone instance is running at {}", connect.getConnectionString()); running = true; }
Example #19
Source File: UserTaskManagerTest.java From cruise-control with BSD 2-Clause "Simplified" License | 4 votes |
@Test public void testSessionsShareUserTask() { UUID testUserTaskId = UUID.randomUUID(); UserTaskManager.UUIDGenerator mockUUIDGenerator = EasyMock.mock(UserTaskManager.UUIDGenerator.class); EasyMock.expect(mockUUIDGenerator.randomUUID()).andReturn(testUserTaskId).anyTimes(); HttpSession mockHttpSession = EasyMock.mock(HttpSession.class); EasyMock.expect(mockHttpSession.getLastAccessedTime()).andReturn(100L).anyTimes(); Map<String, String []> requestParams1 = new HashMap<>(); requestParams1.put("param", new String[]{"true"}); HttpServletRequest mockHttpServletRequest1 = prepareRequest(mockHttpSession, null, "test", requestParams1); HttpServletResponse mockHttpServletResponse1 = EasyMock.mock(HttpServletResponse.class); Capture<String> userTaskHeader = Capture.newInstance(); Capture<String> userTaskHeaderValue = Capture.newInstance(); mockHttpServletResponse1.setHeader(EasyMock.capture(userTaskHeader), EasyMock.capture(userTaskHeaderValue)); Map<String, String []> requestParams2 = new HashMap<>(); requestParams2.put("param", new String[]{"true"}); HttpServletRequest mockHttpServletRequest2 = prepareRequest(mockHttpSession, null, "test", requestParams2); HttpServletResponse mockHttpServletResponse2 = EasyMock.mock(HttpServletResponse.class); mockHttpServletResponse2.setHeader(EasyMock.capture(userTaskHeader), EasyMock.capture(userTaskHeaderValue)); Map<String, String []> requestParams3 = new HashMap<>(); requestParams3.put("param", new String[]{"true"}); HttpServletRequest mockHttpServletRequest3 = prepareRequest(mockHttpSession, testUserTaskId.toString(), "test", requestParams3); HttpServletResponse mockHttpServletResponse3 = EasyMock.mock(HttpServletResponse.class); mockHttpServletResponse3.setHeader(EasyMock.capture(userTaskHeader), EasyMock.capture(userTaskHeaderValue)); EasyMock.replay(mockUUIDGenerator, mockHttpSession, mockHttpServletResponse1, mockHttpServletResponse2, mockHttpServletResponse3); OperationFuture future = new OperationFuture("future"); UserTaskManager userTaskManager = new UserTaskManager(1000, 5, TimeUnit.HOURS.toMillis(6), 100, new MockTime(), mockUUIDGenerator); userTaskManager.getOrCreateUserTask(mockHttpServletRequest1, mockHttpServletResponse1, uuid -> future, 0, true, null); userTaskManager.getOrCreateUserTask(mockHttpServletRequest2, mockHttpServletResponse2, uuid -> future, 0, true, null); // Test UserTaskManger can recognize the previous created task by taskId. userTaskManager.getOrCreateUserTask(mockHttpServletRequest3, mockHttpServletResponse3, uuid -> future, 0, true, null); // The 2nd request should reuse the UserTask created for the 1st request since they use the same session and send the same request. Assert.assertEquals(1, userTaskManager.numActiveSessionKeys()); }
Example #20
Source File: UserTaskManagerTest.java From cruise-control with BSD 2-Clause "Simplified" License | 4 votes |
@Test public void testCreateUserTask() { UUID testUserTaskId = UUID.randomUUID(); UserTaskManager.UUIDGenerator mockUUIDGenerator = EasyMock.mock(UserTaskManager.UUIDGenerator.class); EasyMock.expect(mockUUIDGenerator.randomUUID()).andReturn(testUserTaskId).anyTimes(); HttpSession mockHttpSession = EasyMock.mock(HttpSession.class); EasyMock.expect(mockHttpSession.getLastAccessedTime()).andReturn(100L).anyTimes(); HttpServletRequest mockHttpServletRequest1 = prepareRequest(mockHttpSession, null); HttpServletResponse mockHttpServletResponse = EasyMock.mock(HttpServletResponse.class); Capture<String> userTaskHeader = Capture.newInstance(); Capture<String> userTaskHeaderValue = Capture.newInstance(); mockHttpServletResponse.setHeader(EasyMock.capture(userTaskHeader), EasyMock.capture(userTaskHeaderValue)); EasyMock.replay(mockUUIDGenerator, mockHttpSession, mockHttpServletResponse); OperationFuture future = new OperationFuture("future"); UserTaskManager userTaskManager = new UserTaskManager(1000, 1, TimeUnit.HOURS.toMillis(6), 100, new MockTime(), mockUUIDGenerator); // test-case: create user-task based on request and get future OperationFuture future1 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest1, mockHttpServletResponse, uuid -> future, 0, true, null).get(0); Assert.assertEquals(userTaskHeader.getValue(), UserTaskManager.USER_TASK_HEADER_NAME); Assert.assertEquals(userTaskHeaderValue.getValue(), testUserTaskId.toString()); Assert.assertEquals(future, future1); EasyMock.reset(mockHttpServletResponse); // test-case: get same future back using sessions OperationFuture future2 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest1, mockHttpServletResponse, uuid -> future, 0, true, null).get(0); Assert.assertEquals(userTaskHeader.getValue(), UserTaskManager.USER_TASK_HEADER_NAME); Assert.assertEquals(userTaskHeaderValue.getValue(), testUserTaskId.toString()); Assert.assertEquals(future, future2); HttpServletRequest mockHttpServletRequest2 = prepareRequest(mockHttpSession, testUserTaskId.toString()); EasyMock.reset(mockHttpServletResponse); // test-case: get future back using user-task-id OperationFuture future3 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest2, mockHttpServletResponse, uuid -> future, 0, true, null).get(0); Assert.assertEquals(userTaskHeader.getValue(), UserTaskManager.USER_TASK_HEADER_NAME); Assert.assertEquals(userTaskHeaderValue.getValue(), testUserTaskId.toString()); Assert.assertEquals(future, future3); EasyMock.reset(mockHttpServletResponse); // test-case: for sync task, UserTaskManager does not create mapping between request URL and UUID. HttpServletRequest mockHttpServletRequest3 = prepareRequest(null, null, "test_sync_request", Collections.emptyMap()); OperationFuture future4 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest3, mockHttpServletResponse, uuid -> future, 0, false, null).get(0); // New async request have session mapping where a session key is mapped to the UUID associated with the request. Such a mapping is // not created for sync request. So in the 2 asserts below , we expect that given a http request, we are able to find request associated UUID // for async request but not for sync request. UUID savedUUID = userTaskManager.getUserTaskId(mockHttpServletRequest3); Assert.assertEquals(savedUUID, null); Assert.assertEquals(future4, future); EasyMock.reset(mockHttpServletResponse); OperationFuture future5 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest3, mockHttpServletResponse, uuid -> future, 0, true, null).get(0); savedUUID = userTaskManager.getUserTaskId(mockHttpServletRequest3); Assert.assertNotEquals(savedUUID, null); Assert.assertEquals(future5, future); userTaskManager.close(); }
Example #21
Source File: KafkaImportApplicationIntegrationTest.java From bpmn.ai with BSD 3-Clause "New" or "Revised" License | 4 votes |
@BeforeClass public static void setupBeforeClass() throws Exception { //System.setProperty("hadoop.home.dir", "C:\\Users\\b60\\Desktop\\hadoop-2.6.0\\hadoop-2.6.0"); // setup Zookeeper zkServer = new EmbeddedZookeeper(); String zkConnect = ZOOKEEPER_HOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); ZkUtils zkUtils = ZkUtils.apply(zkClient, false); // setup Kafka Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zkConnect); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString()); brokerProps.setProperty("listeners", "PLAINTEXT://" + KAFKA_HOST + ":" + KAFKA_PORT); brokerProps.setProperty("offsets.topic.replication.factor" , "1"); KafkaConfig config = new KafkaConfig(brokerProps); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); // create topic AdminUtils.createTopic(zkUtils, TOPIC_PROCESS_INSTANCE, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$); AdminUtils.createTopic(zkUtils, TOPIC_ACTIVITY_INSTANCE, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$); AdminUtils.createTopic(zkUtils, TOPIC_VARIABLE_UPDATE, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$); // setup producer Properties producerProps = new Properties(); producerProps.setProperty("bootstrap.servers", KAFKA_HOST + ":" + KAFKA_PORT); producerProps.setProperty("key.serializer","org.apache.kafka.common.serialization.IntegerSerializer"); producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); KafkaProducer<Integer, String> producer = new KafkaProducer<>(producerProps); //fill in test data try (Stream<String> stream = Files.lines(Paths.get(FILE_STREAM_INPUT_PROCESS_INSTANCE))) { stream.forEach(l -> producer.send(new ProducerRecord<>(TOPIC_PROCESS_INSTANCE, 0, 0, l))); } try (Stream<String> stream = Files.lines(Paths.get(FILE_STREAM_INPUT_ACTIVITY_INSTANCE))) { stream.forEach(l -> producer.send(new ProducerRecord<>(TOPIC_ACTIVITY_INSTANCE, 0, 0, l))); } try (Stream<String> stream = Files.lines(Paths.get(FILE_STREAM_INPUT_VARIABLE_UPDATE))) { stream.forEach(l -> producer.send(new ProducerRecord<>(TOPIC_VARIABLE_UPDATE, 0, 0, l))); } }
Example #22
Source File: KafkaProducerApp.java From bpmn.ai with BSD 3-Clause "New" or "Revised" License | 4 votes |
public static void main(String[] args) throws IOException { // setup Zookeeper zkServer = new EmbeddedZookeeper(); String zkConnect = ZOOKEEPER_HOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); ZkUtils zkUtils = ZkUtils.apply(zkClient, false); // setup Kafka Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zkConnect); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString()); brokerProps.setProperty("listeners", "PLAINTEXT://" + KAFKA_HOST + ":" + KAFKA_PORT); brokerProps.setProperty("offsets.topic.replication.factor", "1"); KafkaConfig config = new KafkaConfig(brokerProps); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); // create topic AdminUtils.createTopic(zkUtils, TOPIC_PROCESS_INSTANCE, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$); AdminUtils.createTopic(zkUtils, TOPIC_VARIABLE_UPDATE, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$); // setup producer Properties producerProps = new Properties(); producerProps.setProperty("bootstrap.servers", KAFKA_HOST + ":" + KAFKA_PORT); producerProps.setProperty("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer"); producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); KafkaProducer<Integer, String> producer = new KafkaProducer<>(producerProps); // fill in test data try (Stream<String> stream = Files.lines(Paths.get(FILE_STREAM_INPUT_PROCESS_INSTANCE))) { stream.forEach(l -> producer.send(new ProducerRecord<>(TOPIC_PROCESS_INSTANCE, 0, 0, l))); } try (Stream<String> stream = Files.lines(Paths.get(FILE_STREAM_INPUT_VARIABLE_UPDATE))) { stream.forEach(l -> producer.send(new ProducerRecord<>(TOPIC_VARIABLE_UPDATE, 0, 0, l))); } System.out.println("Kafka Server is running and listening on " + KAFKA_HOST + ":" + KAFKA_PORT + " ..."); }
Example #23
Source File: App.java From bpmn.ai with BSD 3-Clause "New" or "Revised" License | 4 votes |
public static void main(String[] args) throws IOException { // setup Zookeeper zkServer = new EmbeddedZookeeper(); String zkConnect = ZOOKEEPER_HOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); ZkUtils zkUtils = ZkUtils.apply(zkClient, false); // setup Kafka Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zkConnect); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString()); brokerProps.setProperty("listeners", "PLAINTEXT://" + KAFKA_HOST + ":" + KAFKA_PORT); brokerProps.setProperty("offsets.topic.replication.factor", "1"); KafkaConfig config = new KafkaConfig(brokerProps); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); // create topic AdminUtils.createTopic(zkUtils, TOPIC_PROCESS_INSTANCE, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$); AdminUtils.createTopic(zkUtils, TOPIC_VARIABLE_UPDATE, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$); // setup producer Properties producerProps = new Properties(); producerProps.setProperty("bootstrap.servers", KAFKA_HOST + ":" + KAFKA_PORT); producerProps.setProperty("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer"); producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); KafkaProducer<Integer, String> producer = new KafkaProducer<>(producerProps); // fill in test data try (Stream<String> stream = Files.lines(Paths.get(FILE_STREAM_INPUT_PROCESS_INSTANCE))) { stream.forEach(l -> producer.send(new ProducerRecord<>(TOPIC_PROCESS_INSTANCE, 0, 0, l))); } try (Stream<String> stream = Files.lines(Paths.get(FILE_STREAM_INPUT_VARIABLE_UPDATE))) { stream.forEach(l -> producer.send(new ProducerRecord<>(TOPIC_VARIABLE_UPDATE, 0, 0, l))); } }