org.apache.kafka.common.protocol.SecurityProtocol Java Examples
The following examples show how to use
org.apache.kafka.common.protocol.SecurityProtocol.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaAssignmentGenerator.java From kafka-assigner with Apache License 2.0 | 6 votes |
private static void printCurrentBrokers(ZkUtils zkUtils) throws JSONException { List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster()); JSONArray json = new JSONArray(); for (Broker broker : brokers) { BrokerEndPoint endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT); JSONObject brokerJson = new JSONObject(); brokerJson.put("id", broker.id()); brokerJson.put("host", endpoint.host()); brokerJson.put("port", endpoint.port()); if (broker.rack().isDefined()) { brokerJson.put("rack", broker.rack().get()); } json.put(brokerJson); } System.out.println("CURRENT BROKERS:"); System.out.println(json.toString()); }
Example #2
Source File: KafkaAssignmentGenerator.java From kafka-assigner with Apache License 2.0 | 6 votes |
private static Set<Integer> brokerHostnamesToBrokerIds( ZkUtils zkUtils, Set<String> brokerHostnameSet, boolean checkPresence) { List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster()); Set<Integer> brokerIdSet = Sets.newHashSet(); for (Broker broker : brokers) { BrokerEndPoint endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT); if (brokerHostnameSet.contains(endpoint.host())) { brokerIdSet.add(broker.id()); } } Preconditions.checkArgument(!checkPresence || brokerHostnameSet.size() == brokerIdSet.size(), "Some hostnames could not be found! We found: " + brokerIdSet); return brokerIdSet; }
Example #3
Source File: ParserTopologyCLI.java From metron with Apache License 2.0 | 6 votes |
private static Optional<String> getSecurityProtocol(Optional<String> protocol, List<Map<String, Object>> spoutConfig) { Optional<String> ret = protocol; if(ret.isPresent() && protocol.get().equalsIgnoreCase(SecurityProtocol.PLAINTEXT.name)) { ret = Optional.empty(); } if(!ret.isPresent()) { // Need to look through spoutConfig for any non-plaintext String spoutConfigSp = null; for (Map<String, Object> config: spoutConfig) { String configSp = (String) config.get(KafkaUtils.SECURITY_PROTOCOL); if (configSp != null && !SecurityProtocol.PLAINTEXT.name.equals(configSp)) { // We have a winner spoutConfigSp = configSp; } else if (configSp != null) { // Use something explicitly defined. spoutConfigSp = configSp; } } ret = Optional.ofNullable(spoutConfigSp); } if(ret.isPresent() && ret.get().equalsIgnoreCase(SecurityProtocol.PLAINTEXT.name)) { ret = Optional.empty(); } return ret; }
Example #4
Source File: PepperBoxKafkaSampler.java From pepper-box with Apache License 2.0 | 5 votes |
/** * Set default parameters and their values * * @return */ @Override public Arguments getDefaultParameters() { Arguments defaultParameters = new Arguments(); defaultParameters.addArgument(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, ProducerKeys.BOOTSTRAP_SERVERS_CONFIG_DEFAULT); defaultParameters.addArgument(ProducerKeys.ZOOKEEPER_SERVERS, ProducerKeys.ZOOKEEPER_SERVERS_DEFAULT); defaultParameters.addArgument(ProducerKeys.KAFKA_TOPIC_CONFIG, ProducerKeys.KAFKA_TOPIC_CONFIG_DEFAULT); defaultParameters.addArgument(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ProducerKeys.KEY_SERIALIZER_CLASS_CONFIG_DEFAULT); defaultParameters.addArgument(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ProducerKeys.VALUE_SERIALIZER_CLASS_CONFIG_DEFAULT); defaultParameters.addArgument(ProducerConfig.COMPRESSION_TYPE_CONFIG, ProducerKeys.COMPRESSION_TYPE_CONFIG_DEFAULT); defaultParameters.addArgument(ProducerConfig.BATCH_SIZE_CONFIG, ProducerKeys.BATCH_SIZE_CONFIG_DEFAULT); defaultParameters.addArgument(ProducerConfig.LINGER_MS_CONFIG, ProducerKeys.LINGER_MS_CONFIG_DEFAULT); defaultParameters.addArgument(ProducerConfig.BUFFER_MEMORY_CONFIG, ProducerKeys.BUFFER_MEMORY_CONFIG_DEFAULT); defaultParameters.addArgument(ProducerConfig.ACKS_CONFIG, ProducerKeys.ACKS_CONFIG_DEFAULT); defaultParameters.addArgument(ProducerConfig.SEND_BUFFER_CONFIG, ProducerKeys.SEND_BUFFER_CONFIG_DEFAULT); defaultParameters.addArgument(ProducerConfig.RECEIVE_BUFFER_CONFIG, ProducerKeys.RECEIVE_BUFFER_CONFIG_DEFAULT); defaultParameters.addArgument(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.PLAINTEXT.name); defaultParameters.addArgument(PropsKeys.KEYED_MESSAGE_KEY, PropsKeys.KEYED_MESSAGE_DEFAULT); defaultParameters.addArgument(PropsKeys.MESSAGE_KEY_PLACEHOLDER_KEY, PropsKeys.MSG_KEY_PLACEHOLDER); defaultParameters.addArgument(PropsKeys.MESSAGE_VAL_PLACEHOLDER_KEY, PropsKeys.MSG_PLACEHOLDER); defaultParameters.addArgument(ProducerKeys.KERBEROS_ENABLED, ProducerKeys.FLAG_NO); defaultParameters.addArgument(ProducerKeys.JAVA_SEC_AUTH_LOGIN_CONFIG, ProducerKeys.JAVA_SEC_AUTH_LOGIN_CONFIG_DEFAULT); defaultParameters.addArgument(ProducerKeys.JAVA_SEC_KRB5_CONFIG, ProducerKeys.JAVA_SEC_KRB5_CONFIG_DEFAULT); defaultParameters.addArgument(ProducerKeys.SASL_KERBEROS_SERVICE_NAME, ProducerKeys.SASL_KERBEROS_SERVICE_NAME_DEFAULT); defaultParameters.addArgument(ProducerKeys.SASL_MECHANISM, ProducerKeys.SASL_MECHANISM_DEFAULT); defaultParameters.addArgument(ProducerKeys.SSL_ENABLED, ProducerKeys.FLAG_NO); defaultParameters.addArgument(SslConfigs.SSL_KEY_PASSWORD_CONFIG, "<Key Password>"); defaultParameters.addArgument(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "<Keystore Location>"); defaultParameters.addArgument(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "<Keystore Password>"); defaultParameters.addArgument(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE); defaultParameters.addArgument(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, "<Truststore Location>"); defaultParameters.addArgument(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "<Truststore Password>"); defaultParameters.addArgument(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, SslConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE); return defaultParameters; }
Example #5
Source File: ZkConsumerCommand.java From azeroth with Apache License 2.0 | 5 votes |
public List<BrokerInfo> fetchAllBrokers() { List<BrokerInfo> result = new ArrayList<>(); Seq<Broker> brokers = zkUtils.getAllBrokersInCluster(); Iterator<Broker> iterator = brokers.toList().iterator(); while (iterator.hasNext()) { Broker broker = iterator.next(); Node node = broker.getNode(SecurityProtocol.PLAINTEXT); result.add(new BrokerInfo(node.idString(), node.host(), node.port())); } return result; }
Example #6
Source File: TestUtil09.java From datacollector with Apache License 2.0 | 5 votes |
public static Properties createKafkaConfig(int port, String zkConnect, boolean autoCreateTopic, int numPartitions) { final Option<File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); Properties props = TestUtils.createBrokerConfig( 0, zkConnect, false, false, port, noInterBrokerSecurityProtocol, noFile, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort()); props.setProperty("auto.create.topics.enable", String.valueOf(autoCreateTopic)); props.setProperty("num.partitions", String.valueOf(numPartitions)); props.setProperty("message.max.bytes", "500"); return props; }
Example #7
Source File: KafkaEmbeddedRule.java From devicehive-java-server with Apache License 2.0 | 5 votes |
@Override protected void before() throws Throwable { int zkConnectionTimeout = 6000; int zkSessionTimeout = 6000; int zookeeperPort = Optional.ofNullable(System.getProperty("zookeeper.port")) .filter(s -> !s.isEmpty()) .map(Integer::parseInt) .orElse(ZOOKEEPER_DEFAULT_PORT); this.zookeeper = new EmbeddedZookeeperInternal(zookeeperPort); this.zkConnect = "127.0.0.1:" + this.zookeeper.getPort(); this.zookeeperClient = new ZkClient(this.zkConnect, zkSessionTimeout, zkConnectionTimeout, ZKStringSerializer$.MODULE$); int kafkaPort = Optional.ofNullable(System.getProperty("kafka.port")) .filter(s -> !s.isEmpty()) .map(Integer::parseInt) .orElse(KAFKA_DEFAULT_PORT); Properties brokerConfigProperties = TestUtils.createBrokerConfig(0, this.zkConnect, this.controlledShutdown, true, kafkaPort, scala.Option.<SecurityProtocol>apply(null), scala.Option.<File>apply(null), scala.Option.<Properties>apply(null), true, false, 0, false, 0, false, 0, scala.Option.<String>apply(null)); brokerConfigProperties.setProperty("replica.socket.timeout.ms", "1000"); brokerConfigProperties.setProperty("controller.socket.timeout.ms", "1000"); brokerConfigProperties.setProperty("offsets.topic.replication.factor", "1"); this.kafkaServer = TestUtils.createServer(new KafkaConfig(brokerConfigProperties), SystemTime$.MODULE$); ZkUtils zkUtils = new ZkUtils(this.zookeeperClient, null, false); Properties properties = new Properties(); for (String topic : this.topics) { if (!AdminUtils.topicExists(zkUtils, topic)) { AdminUtils.createTopic(zkUtils, topic, partitions, 1, properties, null); } } }
Example #8
Source File: MiniKafkaCluster.java From AthenaX with Apache License 2.0 | 4 votes |
public int getKafkaServerPort(int index) { return kafkaServer.get(index).socketServer().boundPort(SecurityProtocol.PLAINTEXT); }