Java Code Examples for kafka.admin.TopicCommand#createTopic()
The following examples show how to use
kafka.admin.TopicCommand#createTopic() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestKafkaSink.java From suro with Apache License 2.0 | 6 votes |
@Test public void testMultithread() throws IOException { TopicCommand.createTopic(zk.getZkClient(), new TopicCommand.TopicCommandOptions(new String[]{ "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD, "--replication-factor", "2", "--partitions", "1"})); String description = "{\n" + " \"type\": \"kafka\",\n" + " \"client.id\": \"kafkasink\",\n" + " \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" + " \"acks\": 1\n" + "}"; KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>() { }); sink.open(); int msgCount = 10000; sendMessages(TOPIC_NAME_MULTITHREAD, sink, msgCount); assertTrue(sink.getNumOfPendingMessages() > 0); sink.close(); System.out.println(sink.getStat()); assertEquals(sink.getNumOfPendingMessages(), 0); checkConsumer(TOPIC_NAME_MULTITHREAD, msgCount - (int) sink.droppedRecords.get()); }
Example 2
Source File: KafkaOperatorTestBase.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
public void createTopic(int clusterid, String topicName) { String[] args = new String[9]; args[0] = "--zookeeper"; args[1] = "localhost:" + TEST_ZOOKEEPER_PORT[clusterid]; args[2] = "--replication-factor"; args[3] = "1"; args[4] = "--partitions"; if (hasMultiPartition) { args[5] = "2"; } else { args[5] = "1"; } args[6] = "--topic"; args[7] = topicName; args[8] = "--create"; ZkUtils zu = ZkUtils.apply("localhost:" + TEST_ZOOKEEPER_PORT[clusterid], 30000, 30000, false); TopicCommand.createTopic(zu, new TopicCommand.TopicCommandOptions(args)); }
Example 3
Source File: KafkaCommand.java From message-queue-client-framework with Apache License 2.0 | 5 votes |
/** * <p>Title: createTopic</p> * <p>Description: 创建队列操作</p> * * @param zookeeperStr zookeeper地址 * @param topic 队列名称 * @param replications 复制个数 * @param partitions 分区个数 */ public static void createTopic(String zookeeperStr, String topic, int replications, int partitions) { TopicCommand.createTopic(ZkUtils.apply(zookeeperStr, sessionTimeout, connectionTimeout, JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", String.valueOf(replications), "--partitions", String.valueOf(partitions)})); }
Example 4
Source File: KafkaStarterUtils.java From uReplicator with Apache License 2.0 | 5 votes |
public static void createTopic(String kafkaTopic, int numOfPartitions, String zkStr, String replicatorFactor) { // TopicCommand.main() will call System.exit() finally, which will break maven-surefire-plugin try { String[] args = new String[]{"--create", "--zookeeper", zkStr, "--replication-factor", replicatorFactor, "--partitions", String.valueOf(numOfPartitions), "--topic", kafkaTopic}; KafkaZkClient zkClient = KafkaZkClient .apply(zkStr, false, 3000, 3000, Integer.MAX_VALUE, Time.SYSTEM, "kafka.server", "SessionExpireListener"); TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(args); TopicCommand.createTopic(zkClient, opts); } catch (TopicExistsException e) { // Catch TopicExistsException otherwise it will break maven-surefire-plugin System.out.println("Topic already existed"); } }
Example 5
Source File: KafkaMessageSenderImplTest.java From message-queue-client-framework with Apache License 2.0 | 4 votes |
@Before public void before() { try { zkServer = new EmbeddedZookeeper(); zkConnect = String.format("localhost:%d", zkServer.port()); ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000, JaasUtils.isZkSecurityEnabled()); zkClient = zkUtils.zkClient(); Time mock = new SystemTime(); final Option<File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); final Option<Properties> noPropertiesOption = scala.Option.apply(null); final Option<String> noStringOption = scala.Option.apply(null); kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps.setProperty("auto.create.topics.enable", "true"); kafkaProps.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps.setProperty("zookeeper.connect", this.zkConnect); kafkaProps.setProperty("host.name", "localhost"); kafkaProps.setProperty("port", port + ""); KafkaConfig config = new KafkaConfig(kafkaProps); kafkaServer = TestUtils.createServer(config, mock); // create topic TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", "1", "--partitions", "1"}); TopicCommand.createTopic(zkUtils, options); List<KafkaServer> servers = new ArrayList<KafkaServer>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated( scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); } catch (Exception e) { } }
Example 6
Source File: ZookeeperBrokersTest.java From message-queue-client-framework with Apache License 2.0 | 4 votes |
@Before public void before() { try { zkServer = new EmbeddedZookeeper(); zkConnect = String.format("localhost:%d", zkServer.port()); ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000, JaasUtils.isZkSecurityEnabled()); zkClient = zkUtils.zkClient(); Time mock = new SystemTime(); final Option<File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); final Option<Properties> noPropertiesOption = scala.Option.apply(null); final Option<String> noStringOption = scala.Option.apply(null); kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps.setProperty("auto.create.topics.enable", "true"); kafkaProps.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps.setProperty("zookeeper.connect", this.zkConnect); kafkaProps.setProperty("host.name", "localhost"); kafkaProps.setProperty("port", port + ""); KafkaConfig config = new KafkaConfig(kafkaProps); kafkaServer = TestUtils.createServer(config, mock); // create topic TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", "1", "--partitions", "4"}); TopicCommand.createTopic(zkUtils, options); List<KafkaServer> servers = new ArrayList<KafkaServer>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated( scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); } catch (Exception e) { } }
Example 7
Source File: KafkaMessageReceiverImplTest.java From message-queue-client-framework with Apache License 2.0 | 4 votes |
@Before public void before() { try { zkServer = new EmbeddedZookeeper(); zkConnect = String.format("localhost:%d", zkServer.port()); ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000, JaasUtils.isZkSecurityEnabled()); zkClient = zkUtils.zkClient(); final Option<File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); final Option<Properties> noPropertiesOption = scala.Option.apply(null); final Option<String> noStringOption = scala.Option.apply(null); kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps.setProperty("auto.create.topics.enable", "true"); kafkaProps.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps.setProperty("zookeeper.connect", this.zkConnect); kafkaProps.setProperty("host.name", "localhost"); kafkaProps.setProperty("port", port + ""); Properties kafkaProps2 = TestUtils.createBrokerConfig(brokerId + 1, zkConnect, false, false, (port - 1), noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps2.setProperty("auto.create.topics.enable", "true"); kafkaProps2.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps2.setProperty("zookeeper.connect", this.zkConnect); kafkaProps2.setProperty("host.name", "localhost"); kafkaProps2.setProperty("port", (port - 1) + ""); KafkaConfig config = new KafkaConfig(kafkaProps); KafkaConfig config2 = new KafkaConfig(kafkaProps2); Time mock = new SystemTime(); Time mock2 = new SystemTime(); kafkaServer = TestUtils.createServer(config, mock); KafkaServer kafkaServer2 = TestUtils.createServer(config2, mock2); // create topic TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", "2", "--partitions", "2"}); TopicCommand.createTopic(zkUtils, options); List<KafkaServer> servers = new ArrayList<KafkaServer>(); servers.add(kafkaServer); servers.add(kafkaServer2); TestUtils.waitUntilMetadataIsPropagated( scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); } catch (Exception e) { } }
Example 8
Source File: ZookeeperHostsTest.java From message-queue-client-framework with Apache License 2.0 | 4 votes |
@Before public void before() { try { zkServer = new EmbeddedZookeeper(); zkConnect = String.format("localhost:%d", zkServer.port()); ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000, JaasUtils.isZkSecurityEnabled()); zkClient = zkUtils.zkClient(); Time mock = new SystemTime(); final Option<File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); final Option<Properties> noPropertiesOption = scala.Option.apply(null); final Option<String> noStringOption = scala.Option.apply(null); kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps.setProperty("auto.create.topics.enable", "true"); kafkaProps.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps.setProperty("zookeeper.connect", this.zkConnect); kafkaProps.setProperty("host.name", "localhost"); kafkaProps.setProperty("port", port + ""); KafkaConfig config = new KafkaConfig(kafkaProps); kafkaServer = TestUtils.createServer(config, mock); // create topic TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", "1", "--partitions", "1"}); TopicCommand.createTopic(zkUtils, options); List<KafkaServer> servers = new ArrayList<KafkaServer>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated( scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); } catch (Exception e) { } }
Example 9
Source File: ReceiverWithSpringTest.java From message-queue-client-framework with Apache License 2.0 | 4 votes |
@Before public void before() { try { zkServer = new EmbeddedZookeeper(); zkConnect = String.format("localhost:%d", zkServer.port()); ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000, JaasUtils.isZkSecurityEnabled()); zkClient = zkUtils.zkClient(); Time mock = new SystemTime(); final Option<File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); final Option<Properties> noPropertiesOption = scala.Option.apply(null); final Option<String> noStringOption = scala.Option.apply(null); kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps.setProperty("auto.create.topics.enable", "true"); kafkaProps.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps.setProperty("zookeeper.connect", this.zkConnect); kafkaProps.setProperty("host.name", "localhost"); kafkaProps.setProperty("port", port + ""); KafkaConfig config = new KafkaConfig(kafkaProps); kafkaServer = TestUtils.createServer(config, mock); // create topic TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", "1", "--partitions", "1"}); TopicCommand.createTopic(zkUtils, options); List<KafkaServer> servers = new ArrayList<KafkaServer>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated( scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); } catch (Exception e) { } }
Example 10
Source File: SenderWithSpringTest.java From message-queue-client-framework with Apache License 2.0 | 4 votes |
@Before public void before() { try { zkServer = new EmbeddedZookeeper(); zkConnect = String.format("localhost:%d", zkServer.port()); ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000, JaasUtils.isZkSecurityEnabled()); zkClient = zkUtils.zkClient(); Time mock = new SystemTime(); final Option<File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); final Option<Properties> noPropertiesOption = scala.Option.apply(null); final Option<String> noStringOption = scala.Option.apply(null); kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps.setProperty("auto.create.topics.enable", "true"); kafkaProps.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps.setProperty("zookeeper.connect", this.zkConnect); kafkaProps.setProperty("host.name", "localhost"); kafkaProps.setProperty("port", port + ""); KafkaConfig config = new KafkaConfig(kafkaProps); kafkaServer = TestUtils.createServer(config, mock); // create topic TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", "1", "--partitions", "1"}); TopicCommand.createTopic(zkUtils, options); List<KafkaServer> servers = new ArrayList<KafkaServer>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated( scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); } catch (Exception e) { } }
Example 11
Source File: TestKafkaSink.java From suro with Apache License 2.0 | 4 votes |
@Test public void testConfigBackwardCompatible() throws IOException { int numPartitions = 9; TopicCommand.createTopic(zk.getZkClient(), new TopicCommand.TopicCommandOptions(new String[]{ "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT, "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)})); String keyTopicMap = String.format(" \"keyTopicMap\": {\n" + " \"%s\": \"key\"\n" + " }", TOPIC_NAME_BACKWARD_COMPAT); String description1 = "{\n" + " \"type\": \"Kafka\",\n" + " \"client.id\": \"kafkasink\",\n" + " \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" + " \"ack\": 1,\n" + " \"compression.type\": \"snappy\",\n" + keyTopicMap + "\n" + "}"; String description2 = "{\n" + " \"type\": \"Kafka\",\n" + " \"client.id\": \"kafkasink\",\n" + " \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" + " \"request.required.acks\": 1,\n" + " \"compression.codec\": \"snappy\",\n" + keyTopicMap + "\n" + "}"; // setup sinks, both old and new versions ObjectMapper jsonMapper = new DefaultObjectMapper(); jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "Kafka")); jsonMapper.setInjectableValues(new InjectableValues() { @Override public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) { if (valueId.equals(KafkaRetentionPartitioner.class.getName())) { return new KafkaRetentionPartitioner(); } else { return null; } } }); KafkaSink sink1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){}); KafkaSink sink2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){}); sink1.open(); sink2.open(); List<Sink> sinks = new ArrayList<Sink>(); sinks.add(sink1); sinks.add(sink2); // setup Kafka consumer (to read back messages) ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector( createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid")); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0); // Send 20 test message, using the old and new Kafka sinks. // Retrieve the messages and ensure that they are identical and sent to the same partition. Random rand = new Random(); int messageCount = 20; for (int i = 0; i < messageCount; ++i) { Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>() .put("key", new Long( rand.nextLong() ) ) .put("value", "message:" + i).build(); // send message to both sinks for( Sink sink : sinks ){ sink.writeTo(new DefaultMessageContainer( new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)), jsonMapper)); } // read two copies of message back from Kafka and check that partitions and data match MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next(); MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next(); System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() ); System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() ); assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition()); String msg1Str = new String( msgAndMeta1.message() ); String msg2Str = new String( msgAndMeta2.message() ); System.out.println( "iteration: "+i+" message1: "+msg1Str ); System.out.println( "iteration: "+i+" message2: "+msg2Str ); assertEquals(msg1Str, msg2Str); } // close sinks sink1.close(); sink2.close(); // close consumer try { stream.iterator().next(); fail(); // there should be no data left to consume } catch (ConsumerTimeoutException e) { //this is expected consumer.shutdown(); } }
Example 12
Source File: NewSenderWithSpringTest.java From message-queue-client-framework with Apache License 2.0 | 4 votes |
@Before public void before() { try { zkServer = new EmbeddedZookeeper(); zkConnect = String.format("localhost:%d", zkServer.port()); ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000, JaasUtils.isZkSecurityEnabled()); zkClient = zkUtils.zkClient(); Time mock = new SystemTime(); final Option<File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); final Option<Properties> noPropertiesOption = scala.Option.apply(null); final Option<String> noStringOption = scala.Option.apply(null); kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps.setProperty("auto.create.topics.enable", "true"); kafkaProps.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps.setProperty("zookeeper.connect", this.zkConnect); kafkaProps.setProperty("host.name", "localhost"); kafkaProps.setProperty("port", port + ""); KafkaConfig config = new KafkaConfig(kafkaProps); kafkaServer = TestUtils.createServer(config, mock); // create topic TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", "1", "--partitions", "1"}); TopicCommand.createTopic(zkUtils, options); List<KafkaServer> servers = new ArrayList<KafkaServer>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated( scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); } catch (Exception e) { } }
Example 13
Source File: KafkaMessageNewSenderPoolTest.java From message-queue-client-framework with Apache License 2.0 | 4 votes |
@Before public void setUp() throws Exception { try { zkServer = new EmbeddedZookeeper(); zkConnect = String.format("localhost:%d", zkServer.port()); ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000, JaasUtils.isZkSecurityEnabled()); zkClient = zkUtils.zkClient(); Time mock = new SystemTime(); final Option<File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); final Option<Properties> noPropertiesOption = scala.Option.apply(null); final Option<String> noStringOption = scala.Option.apply(null); kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps.setProperty("auto.create.topics.enable", "true"); kafkaProps.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps.setProperty("zookeeper.connect", this.zkConnect); KafkaConfig config = new KafkaConfig(kafkaProps); kafkaServer = TestUtils.createServer(config, mock); // create topic TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", "1", "--partitions", "1"}); TopicCommand.createTopic(zkUtils, options); List<KafkaServer> servers = new ArrayList<KafkaServer>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated( scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); } catch (Exception e) { } }
Example 14
Source File: KafkaMessageSenderPoolTest.java From message-queue-client-framework with Apache License 2.0 | 4 votes |
@Before public void before() { try { zkServer = new EmbeddedZookeeper(); zkConnect = String.format("localhost:%d", zkServer.port()); ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000, JaasUtils.isZkSecurityEnabled()); zkClient = zkUtils.zkClient(); Time mock = new SystemTime(); final Option<File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); final Option<Properties> noPropertiesOption = scala.Option.apply(null); final Option<String> noStringOption = scala.Option.apply(null); kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps.setProperty("auto.create.topics.enable", "true"); kafkaProps.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps.setProperty("zookeeper.connect", this.zkConnect); kafkaProps.setProperty("host.name", "localhost"); kafkaProps.setProperty("port", port + ""); KafkaConfig config = new KafkaConfig(kafkaProps); kafkaServer = TestUtils.createServer(config, mock); // create topic TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", "1", "--partitions", "1"}); TopicCommand.createTopic(zkUtils, options); List<KafkaServer> servers = new ArrayList<KafkaServer>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated( scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); } catch (Exception e) { } }
Example 15
Source File: TestKafkaSinkV2.java From suro with Apache License 2.0 | 4 votes |
@Test public void testMultithread() throws IOException { TopicCommand.createTopic(zk.getZkClient(), new TopicCommand.TopicCommandOptions(new String[]{ "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD, "--replication-factor", "2", "--partitions", "1"})); String description = "{\n" + " \"type\": \"kafka\",\n" + " \"client.id\": \"kafkasink\",\n" + " \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" + " \"request.required.acks\": 1,\n" + " \"batchSize\": 10,\n" + " \"jobQueueSize\": 3\n" + "}"; ObjectMapper jsonMapper = new DefaultObjectMapper(); jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka")); KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){}); sink.open(); int msgCount = 10000; for (int i = 0; i < msgCount; ++i) { Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>() .put("key", Integer.toString(i)) .put("value", "message:" + i).build(); sink.writeTo(new DefaultMessageContainer( new Message(TOPIC_NAME_MULTITHREAD, jsonMapper.writeValueAsBytes(msgMap)), jsonMapper)); } assertTrue(sink.getNumOfPendingMessages() > 0); sink.close(); System.out.println(sink.getStat()); assertEquals(sink.getNumOfPendingMessages(), 0); ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector( createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid_multhread")); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(TOPIC_NAME_MULTITHREAD, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_MULTITHREAD).get(0); for (int i = 0; i < msgCount; ++i) { stream.iterator().next(); } try { stream.iterator().next(); fail(); } catch (ConsumerTimeoutException e) { //this is expected consumer.shutdown(); } }
Example 16
Source File: TestKafkaSink.java From suro with Apache License 2.0 | 4 votes |
@Test public void testDefaultParameters() throws IOException { TopicCommand.createTopic(zk.getZkClient(), new TopicCommand.TopicCommandOptions(new String[]{ "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME, "--replication-factor", "2", "--partitions", "1"})); String description = "{\n" + " \"type\": \"kafka\",\n" + " \"client.id\": \"kafkasink\",\n" + " \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" + " \"acks\": 1\n" + "}"; KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){}); sink.open(); Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator(); while (msgIterator.hasNext()) { sink.writeTo(new StringMessage(msgIterator.next())); } assertTrue(sink.getNumOfPendingMessages() > 0); sink.close(); assertEquals(sink.getNumOfPendingMessages(), 0); System.out.println(sink.getStat()); // get the leader Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0); assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined()); int leader = (Integer) leaderOpt.get(); KafkaConfig config; if (leader == kafkaServer.getServer(0).config().brokerId()) { config = kafkaServer.getServer(0).config(); } else { config = kafkaServer.getServer(1).config(); } SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId"); FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build()); List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator()); assertEquals("Should have fetched 2 messages", 2, messageSet.size()); assertEquals(new String(extractMessage(messageSet, 0)), "testMessage" + 0); assertEquals(new String(extractMessage(messageSet, 1)), "testMessage" + 1); }
Example 17
Source File: KafkaMessageNewReceiverPoolTest_4.java From message-queue-client-framework with Apache License 2.0 | 4 votes |
@Before public void setUp() throws Exception { try { zkServer = new EmbeddedZookeeper(); zkConnect = String.format("localhost:%d", zkServer.port()); ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000, JaasUtils.isZkSecurityEnabled()); zkClient = zkUtils.zkClient(); final Option<File> noFile = Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = Option.apply(null); final Option<Properties> noPropertiesOption = Option.apply(null); final Option<String> noStringOption = Option.apply(null); kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps.setProperty("auto.create.topics.enable", "true"); kafkaProps.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps.setProperty("zookeeper.connect", this.zkConnect); KafkaConfig config = new KafkaConfig(kafkaProps); Time mock = new SystemTime(); kafkaServer = TestUtils.createServer(config, mock); // create topic TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", "1", "--partitions", "2"}); TopicCommand.createTopic(zkUtils, options); List<KafkaServer> servers = new ArrayList<KafkaServer>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated( scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); } catch (Exception e) { } }
Example 18
Source File: LocalKafkaServer.java From Krackle with Apache License 2.0 | 4 votes |
public void createTopic(String topic) { TopicCommandOptions createOpts = new TopicCommandOptions(new String[]{"--create", "--zookeeper", "localhost:21818", "--replication-factor", "1", "--partition", "1", "--topic", topic}); TopicCommand.createTopic(zkUtils,createOpts); }
Example 19
Source File: NewReceiverWithSpringTest.java From message-queue-client-framework with Apache License 2.0 | 4 votes |
@Before public void before() { try { zkServer = new EmbeddedZookeeper(); zkConnect = String.format("localhost:%d", zkServer.port()); ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000, JaasUtils.isZkSecurityEnabled()); zkClient = zkUtils.zkClient(); Time mock = new SystemTime(); final Option<File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); final Option<Properties> noPropertiesOption = scala.Option.apply(null); final Option<String> noStringOption = scala.Option.apply(null); kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps.setProperty("auto.create.topics.enable", "true"); kafkaProps.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps.setProperty("zookeeper.connect", this.zkConnect); kafkaProps.setProperty("host.name", "localhost"); kafkaProps.setProperty("port", port + ""); KafkaConfig config = new KafkaConfig(kafkaProps); kafkaServer = TestUtils.createServer(config, mock); // create topic TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", "1", "--partitions", "1"}); TopicCommand.createTopic(zkUtils, options); List<KafkaServer> servers = new ArrayList<KafkaServer>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated( scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); } catch (Exception e) { } }
Example 20
Source File: SenderTest.java From message-queue-client-framework with Apache License 2.0 | 4 votes |
@Before public void before() { try { zkServer = new EmbeddedZookeeper(); zkConnect = String.format("localhost:%d", zkServer.port()); ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000, JaasUtils.isZkSecurityEnabled()); zkClient = zkUtils.zkClient(); Time mock = new SystemTime(); final Option<File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); final Option<Properties> noPropertiesOption = scala.Option.apply(null); final Option<String> noStringOption = scala.Option.apply(null); kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort()); kafkaProps.setProperty("auto.create.topics.enable", "true"); kafkaProps.setProperty("num.partitions", "1"); // We *must* override this to use the port we allocated (Kafka currently // allocates one port // that it always uses for ZK kafkaProps.setProperty("zookeeper.connect", this.zkConnect); kafkaProps.setProperty("host.name", "localhost"); kafkaProps.setProperty("port", port + ""); KafkaConfig config = new KafkaConfig(kafkaProps); kafkaServer = TestUtils.createServer(config, mock); // create topic TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions( new String[]{"--create", "--topic", topic, "--replication-factor", "1", "--partitions", "1"}); TopicCommand.createTopic(zkUtils, options); List<KafkaServer> servers = new ArrayList<KafkaServer>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated( scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); } catch (Exception e) { } }