scala.collection.mutable.Buffer Java Examples
The following examples show how to use
scala.collection.mutable.Buffer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: EmbeddedKafkaServer.java From atlas with Apache License 2.0 | 6 votes |
private void startKafka() throws IOException, URISyntaxException { String kafkaValue = properties.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG); LOG.info("Starting kafka at {}", kafkaValue); URL kafkaAddress = getURL(kafkaValue); Properties brokerConfig = properties; brokerConfig.setProperty("broker.id", "1"); brokerConfig.setProperty("host.name", kafkaAddress.getHost()); brokerConfig.setProperty("port", String.valueOf(kafkaAddress.getPort())); brokerConfig.setProperty("log.dirs", constructDir("kafka").getAbsolutePath()); brokerConfig.setProperty("log.flush.interval.messages", String.valueOf(1)); List<KafkaMetricsReporter> metrics = new ArrayList<>(); Buffer<KafkaMetricsReporter> metricsReporters = scala.collection.JavaConversions.asScalaBuffer(metrics); kafkaServer = new KafkaServer(KafkaConfig.fromProps(brokerConfig), new SystemTime(), Option.apply(this.getClass().getName()), metricsReporters); kafkaServer.startup(); LOG.info("Embedded kafka server started with broker config {}", brokerConfig); }
Example #2
Source File: EmbeddedKafkaBroker.java From karaf-decanter with Apache License 2.0 | 5 votes |
private KafkaServer startBroker(Properties props) { List<KafkaMetricsReporter> kmrList = new ArrayList<>(); Buffer<KafkaMetricsReporter> metricsList = scala.collection.JavaConversions.asScalaBuffer(kmrList); KafkaServer server = new KafkaServer(new KafkaConfig(props), new SystemTime(), Option.<String>empty(), metricsList); server.startup(); return server; }
Example #3
Source File: KafkaTool.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
/** * Re-assign topic/partition to remainingBrokers * Remaining brokers is a list of id's of the brokers where the topic/partition is to be moved to. * * Thus if remainingBrokers = [1,2] the topic will be moved to brokers 1 and 2 * * @see https://kafka.apache.org/documentation.html#basic_ops_cluster_expansion * @see https://cwiki.apache.org/confluence/display/KAFKA/Replication+tools#Replicationtools-6.ReassignPartitionsTool * */ public boolean reassignPartition(String topic, int partition, List<Object> remainingBrokers) { ZkClient client = new ZkClient(zkConnects, 10000, 10000, ZKStringSerializer$.MODULE$); TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Buffer<Object> seqs = asScalaBuffer(remainingBrokers); Map<TopicAndPartition, Seq<Object>> map = new HashMap<>(); map.put(topicAndPartition, seqs); scala.collection.mutable.Map<TopicAndPartition, Seq<Object>> x = asScalaMap(map); ReassignPartitionsCommand command = new ReassignPartitionsCommand(client, x); return command.reassignPartitions(); }
Example #4
Source File: KafkaTool.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
public boolean reassignPartitionReplicas(String topic, int partition, Integer ... brokerId) { ZkClient client = new ZkClient(zkConnects, 10000, 10000, ZKStringSerializer$.MODULE$); TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Buffer<Object> seqs = asScalaBuffer(Arrays.asList((Object[])brokerId)); Map<TopicAndPartition, Seq<Object>> map = new HashMap<>(); map.put(topicAndPartition, seqs); ReassignPartitionsCommand command = new ReassignPartitionsCommand(client, asScalaMap(map)); return command.reassignPartitions(); }
Example #5
Source File: ITZipkinReceiver.java From incubator-retired-htrace with Apache License 2.0 | 4 votes |
@Test public void testKafkaTransport() throws Exception { String topic = "zipkin"; // Kafka setup EmbeddedZookeeper zkServer = new EmbeddedZookeeper(TestZKUtils.zookeeperConnect()); ZkClient zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$); Properties props = TestUtils.createBrokerConfig(0, TestUtils.choosePort(), false); KafkaConfig config = new KafkaConfig(props); KafkaServer kafkaServer = TestUtils.createServer(config, new MockTime()); Buffer<KafkaServer> servers = JavaConversions.asScalaBuffer(Collections.singletonList(kafkaServer)); TestUtils.createTopic(zkClient, topic, 1, 1, servers, new Properties()); zkClient.close(); TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0, 5000); // HTrace HTraceConfiguration hTraceConfiguration = HTraceConfiguration.fromKeyValuePairs( "sampler.classes", "AlwaysSampler", "span.receiver.classes", ZipkinSpanReceiver.class.getName(), "zipkin.kafka.metadata.broker.list", config.advertisedHostName() + ":" + config.advertisedPort(), "zipkin.kafka.topic", topic, ZipkinSpanReceiver.TRANSPORT_CLASS_KEY, KafkaTransport.class.getName() ); final Tracer tracer = new Tracer.Builder("test-tracer") .tracerPool(new TracerPool("test-tracer-pool")) .conf(hTraceConfiguration) .build(); String scopeName = "test-kafka-transport-scope"; TraceScope traceScope = tracer.newScope(scopeName); traceScope.close(); tracer.close(); // Kafka consumer Properties consumerProps = new Properties(); consumerProps.put("zookeeper.connect", props.getProperty("zookeeper.connect")); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "testing.group"); consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "smallest"); ConsumerConnector connector = kafka.consumer.Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(consumerProps)); Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topicCountMap); ConsumerIterator<byte[], byte[]> it = streams.get(topic).get(0).iterator(); // Test Assert.assertTrue("We should have one message in Kafka", it.hasNext()); Span span = new Span(); new TDeserializer(new TBinaryProtocol.Factory()).deserialize(span, it.next().message()); Assert.assertEquals("The span name should match our scope description", span.getName(), scopeName); kafkaServer.shutdown(); }
Example #6
Source File: ScoverageWriter.java From gradle-scoverage with Apache License 2.0 | 4 votes |
/** * Generates all reports from given data. * * @param sourceDir directory with project sources * @param reportDir directory for generate reports * @param coverage coverage data * @param sourceEncoding the encoding of the source files * @param coverageOutputCobertura switch for Cobertura output * @param coverageOutputXML switch for Scoverage XML output * @param coverageOutputHTML switch for Scoverage HTML output * @param coverageDebug switch for Scoverage Debug output */ public void write(File sourceDir, File reportDir, Coverage coverage, String sourceEncoding, Boolean coverageOutputCobertura, Boolean coverageOutputXML, Boolean coverageOutputHTML, Boolean coverageDebug) { logger.info("[scoverage] Generating scoverage reports..."); reportDir.mkdirs(); if (coverageOutputCobertura) { new CoberturaXmlWriter(sourceDir, reportDir).write(coverage); logger.info("[scoverage] Written Cobertura XML report to " + reportDir.getAbsolutePath() + File.separator + "cobertura.xml"); } if (coverageOutputXML) { new ScoverageXmlWriter(sourceDir, reportDir, /* debug = */ false).write(coverage); logger.info("[scoverage] Written XML report to " + reportDir.getAbsolutePath() + File.separator + Constants.XMLReportFilename()); if (coverageDebug) { new ScoverageXmlWriter(sourceDir, reportDir, /* debug = */ true).write(coverage); logger.info("[scoverage] Written XML report with debug information to " + reportDir.getAbsolutePath() + File.separator + Constants.XMLReportFilenameWithDebug()); } } if (coverageOutputHTML) { Buffer<File> sources = JavaConverters.asScalaBufferConverter(Arrays.asList(sourceDir)).asScala(); new ScoverageHtmlWriter(sources, reportDir, new Some<>(sourceEncoding)).write(coverage); logger.info("[scoverage] Written HTML report to " + reportDir.getAbsolutePath() + File.separator + "index.html"); } logger.info("[scoverage] Coverage reports completed"); }
Example #7
Source File: MySqlAsyncConnection.java From ob1k with Apache License 2.0 | 4 votes |
public ComposableFuture<QueryResult> sendPreparedStatement(final String query, final List<Object> values) { final Buffer<Object> scalaValues = JavaConversions.asScalaBuffer(values); return ScalaFutureHelper.from(() -> conn.sendPreparedStatement(query, scalaValues)); }
Example #8
Source File: MySqlConnectionPool.java From ob1k with Apache License 2.0 | 4 votes |
@Override public ComposableFuture<QueryResult> sendPreparedStatement(final String query, final List<Object> values) { final Buffer<Object> scalaValues = JavaConversions.asScalaBuffer(values); return withMetricsPreparedStatement(ScalaFutureHelper.from(() -> _pool.sendPreparedStatement(query, scalaValues))); }