org.apache.kafka.clients.producer.RecordMetadata Java Examples
The following examples show how to use
org.apache.kafka.clients.producer.RecordMetadata.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AsyncProducer.java From apicurio-registry with Apache License 2.0 | 7 votes |
@Override public CompletableFuture<RecordMetadata> apply(ProducerRecord<K, V> record) { CompletableFuture<RecordMetadata> result = null; try { KafkaProducer<K, V> producer = getProducer(); result = new CFC(producer); producer.send(record, (CFC) result); } catch (Exception e) { if (result != null) { ((CFC) result).onCompletion(null, e); } else { result = new CompletableFuture<>(); result.completeExceptionally(e); } } return result; }
Example #2
Source File: TopicProducer.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
/** * Topic topicName will be automatically created if it doesn't exist. * @param topicName * @param recordsToPublish * @param schema * @return * @throws InterruptedException * @throws TimeoutException * @throws ExecutionException */ public Map<String, RecordMetadata> produceInputData(String topicName, Map<String, GenericRow> recordsToPublish, Schema schema) throws InterruptedException, TimeoutException, ExecutionException { KafkaProducer<String, GenericRow> producer = new KafkaProducer<>(producerConfig, new StringSerializer(), new KsqlJsonSerializer(schema)); Map<String, RecordMetadata> result = new HashMap<>(); for (Map.Entry<String, GenericRow> recordEntry : recordsToPublish.entrySet()) { String key = recordEntry.getKey(); ProducerRecord<String, GenericRow> producerRecord = new ProducerRecord<>(topicName, key, recordEntry.getValue()); Future<RecordMetadata> recordMetadataFuture = producer.send(producerRecord); result.put(key, recordMetadataFuture.get(TEST_RECORD_FUTURE_TIMEOUT_MS, TimeUnit.MILLISECONDS)); } producer.close(); return result; }
Example #3
Source File: CounterProviderAVRO.java From jMetalSP with MIT License | 6 votes |
public void run() { int count = 0; long startTime = System.currentTimeMillis(); DataSerializer<Counter> counterSerializer = new DataSerializer(); while (true) { Counter counter = new Counter(count); byte [] aux= counterSerializer.serializeMessage(counter,"avsc/Counter.avsc"); Future<RecordMetadata> send = producer.send(new ProducerRecord<Integer, byte[]> (topic, count, aux)); System.out.println("Kafka enviado : "+count); count++; try { Thread.sleep(5000); } catch (InterruptedException e) { e.printStackTrace(); } } }
Example #4
Source File: KafkaSinglePortExactlyOnceOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
protected void sendTuple(T tuple) { if (alreadyInKafka(tuple)) { return; } getProducer().send(new ProducerRecord<>(getTopic(), key, tuple), new Callback() { public void onCompletion(RecordMetadata metadata, Exception e) { if (e != null) { logger.info("Wrting to Kafka failed with an exception {}" + e.getMessage()); throw new RuntimeException(e); } } }); }
Example #5
Source File: MessageProcessor.java From DBus with Apache License 2.0 | 6 votes |
/*** * send stat info to statistic topic, do not care about success or not. * @param message */ private void sendTableStatInfo(StatMessage message) { String key = String.format("%s.%s.%s.%s.%s", message.getDsName(), message.getSchemaName(), message.getTableName(), message.getType(), message.getTxTimeMS()); String value = message.toJSONString(); Callback callback = new Callback() { @Override public void onCompletion(RecordMetadata ignored, Exception e) { if (e != null) { logger.error(String.format("Send statistic FAIL: toTopic=%s, key=%s", statTopic, key)); } else { logger.info(String.format(" Send statistic successful: toTopic=%s, key=(%s)", statTopic, key)); } } }; Future<RecordMetadata> result = producer.send(new ProducerRecord<>(statTopic, key, value), callback); }
Example #6
Source File: AuditEventKafkaSender.java From singer with Apache License 2.0 | 6 votes |
@Override public void onCompletion(RecordMetadata recordMetadata, Exception e) { try { if (e == null) { OpenTsdbMetricConverter .incr(LoggingAuditClientMetrics.AUDIT_CLIENT_SENDER_KAFKA_EVENTS_ACKED, 1, "host=" + host, "stage=" + stage.toString(), "logName=" + event.getLoggingAuditHeaders().getLogName()); // if send is successful, remove the event from the map eventTriedCount if it was added // LoggingAuditHeaders can uniquely identify an event. eventTriedCount.remove(event.getLoggingAuditHeaders()); // if send out successfully, remove the partition from the badPartitions if it was added. badPartitions.remove(recordMetadata.partition()); } else { checkAndEnqueueWhenSendFailed(); } } catch (Throwable t) { LOG.warn("Exception throws in the callback. Drop this event {}", event, t); OpenTsdbMetricConverter .incr(LoggingAuditClientMetrics.AUDIT_CLIENT_SENDER_KAFKA_CALLBACK_EXCEPTION, 1, "host=" + host, "stage=" + stage.toString(), "topic=" + topic); } }
Example #7
Source File: ProjectTopologyService.java From DBus with Apache License 2.0 | 6 votes |
public void rerunTopology(String topologyCode, String ctrlMsg) { KafkaProducer<String, byte[]> producer = null; try { String topic = StringUtils.joinWith("_", topologyCode, "ctrl"); Properties props = zkService.getProperties(KeeperConstants.KEEPER_CTLMSG_PRODUCER_CONF); Properties globalConf = zkService.getProperties(KeeperConstants.GLOBAL_CONF); props.setProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS, globalConf.getProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS)); if (StringUtils.equals(SecurityConfProvider.getSecurityConf(zkService), Constants.SECURITY_CONFIG_TRUE_VALUE)) { props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT"); } producer = new KafkaProducer<>(props); producer.send(new ProducerRecord<String, byte[]>(topic, ctrlMsg.getBytes()), new Callback() { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { } }); } catch (Exception e) { throw new RuntimeException(e); } finally { if (producer != null) producer.close(); } }
Example #8
Source File: IntegrationTestHarness.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
/** * Topic topicName will be automatically created if it doesn't exist. * @param topicName * @param recordsToPublish * @param timestamp * @return * @throws InterruptedException * @throws TimeoutException * @throws ExecutionException */ public Map<String, RecordMetadata> produceData(String topicName, Map<String, GenericRow> recordsToPublish, Serializer<GenericRow> serializer, Long timestamp) throws InterruptedException, TimeoutException, ExecutionException { createTopic(topicName); Properties producerConfig = properties(); KafkaProducer<String, GenericRow> producer = new KafkaProducer<>(producerConfig, new StringSerializer(), serializer); Map<String, RecordMetadata> result = new HashMap<>(); for (Map.Entry<String, GenericRow> recordEntry : recordsToPublish.entrySet()) { String key = recordEntry.getKey(); Future<RecordMetadata> recordMetadataFuture = producer.send(buildRecord(topicName, timestamp, recordEntry, key)); result.put(key, recordMetadataFuture.get(TEST_RECORD_FUTURE_TIMEOUT_MS, TimeUnit.MILLISECONDS)); } producer.close(); return result; }
Example #9
Source File: KafkaEventSenderTest.java From stream-registry with Apache License 2.0 | 6 votes |
@Test public void correlatorSuccess() { var correlator = mock(EventCorrelator.class); var correlationStrategy = new CorrelationStrategyImpl(correlator); var underTest = new KafkaEventSender(config, correlationStrategy, converter, producer); when(correlator.register(any())).thenReturn("correlationId"); var result = underTest.send(event); verify(correlator).register(result); verify(producer).send(recordCaptor.capture(), callbackCaptor.capture()); var record = recordCaptor.getValue(); assertThat(record.topic(), is("topic")); assertThat(record.key(), is(avroKey)); assertThat(record.value(), is(avroValue)); assertThat(record.headers().toArray().length, is(1)); var callback = callbackCaptor.getValue(); assertThat(result.isDone(), is(false)); var recordMetadata = mock(RecordMetadata.class); callback.onCompletion(recordMetadata, null); assertThat(result.isDone(), is(false)); }
Example #10
Source File: MergerTest.java From kafka-workers with Apache License 2.0 | 6 votes |
@Override public void process(WorkerRecord<String, String> record, RecordStatusObserver observer) { logger.info("process(partition: {}, timestamp: {})", record.partition(), record.timestamp()); Future<RecordMetadata> future = taskProducer.send(new ProducerRecord<>( OUTPUT_TOPIC, record.partition(), record.timestamp(), record.key(), record.value())); try { future.get(); } catch (InterruptedException | ExecutionException e) { observer.onFailure(new ProcessingFailureException("could not send " + record, e)); } observer.onSuccess(); }
Example #11
Source File: TracingKafkaProducer.java From java-kafka-client with Apache License 2.0 | 6 votes |
public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback, SpanContext parent) { /* // Create wrappedRecord because headers can be read only in record (if record is sent second time) ProducerRecord<K, V> wrappedRecord = new ProducerRecord<>(record.topic(), record.partition(), record.timestamp(), record.key(), record.value(), record.headers()); */ Span span = TracingKafkaUtils .buildAndInjectSpan(record, tracer, producerSpanNameProvider, parent, spanDecorators); try (Scope ignored = tracer.activateSpan(span)) { Callback wrappedCallback = new TracingCallback(callback, span, tracer, spanDecorators); return producer.send(record, wrappedCallback); } }
Example #12
Source File: ContextModelController.java From SO with BSD 2-Clause "Simplified" License | 6 votes |
private ContextModelForMQ processContextModel(ContextModelForIf2 contextModelForIf, HttpServletRequest request) { log.debug("input:ContextModelForIf: {}", contextModelForIf); // create a message From ContextModelForMQ for messageQueue, publish to message queue // ContextModelForIf --> ContextModelForMQ ContextModelForMQ contextModelForMQ = ContextModelMapper2.toContextModelForMQ(contextModelForIf); // tracking TrackingEntity trackingEntity = (TrackingEntity) request.getSession().getAttribute("tracking"); trackingEntity.setSimulatorType(contextModelForIf.getSimulatorType()); // simulator type 지정 contextModelForMQ.setTrackingEntity(trackingEntity); contextModelForMQ.addState(Const.CONTEXTMODEL_ID, contextModelForIf.getContextId()); contextModelForMQ.addState(Const.RESULT_CM_VALUE, contextModelForIf.getResultCmValue()); log.debug("converted:ContextModelForMQ: {}", contextModelForMQ); //object to json String contextModelForMqString = ContextModelMapper2.writeJsonString(contextModelForMQ); log.debug("generated:ContextModelForMQ {}", contextModelForMqString); //context model producer handler DefaultProducerHandler producerHandler = new DefaultProducerHandler(0, "contextmodel"); Future<RecordMetadata> future = producerHandler.send(contextModelForMQ); producerHandler.close(); log.debug("producer.send result: {}", future); return contextModelForMQ; }
Example #13
Source File: KafkaRangerAuthorizerTest.java From ranger with Apache License 2.0 | 6 votes |
@Test public void testAuthorizedWrite() throws Exception { // Create the Producer Properties producerProps = new Properties(); producerProps.put("bootstrap.servers", "localhost:" + port); producerProps.put("acks", "all"); producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); producerProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); producerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL"); producerProps.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS"); producerProps.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, serviceKeystorePath); producerProps.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "sspass"); producerProps.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, "skpass"); producerProps.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, truststorePath); producerProps.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "security"); final Producer<String, String> producer = new KafkaProducer<>(producerProps); // Send a message Future<RecordMetadata> record = producer.send(new ProducerRecord<String, String>("dev", "somekey", "somevalue")); producer.flush(); record.get(); producer.close(); }
Example #14
Source File: OutputKafka.java From ambari-logsearch with Apache License 2.0 | 6 votes |
public void onCompletion(RecordMetadata metadata, Exception exception) { if (metadata != null) { if (!output.isKafkaBrokerUp) { logger.info("Started writing to kafka. " + output.getShortDescription()); output.isKafkaBrokerUp = true; } output.incrementStat(1); output.writeBytesMetric.value += message.length(); } else { output.isKafkaBrokerUp = false; String logKeyMessage = this.getClass().getSimpleName() + "_KAFKA_ASYNC_ERROR"; LogFeederUtil.logErrorMessageByInterval(logKeyMessage, "Error sending message to Kafka. Async Callback", exception, logger, Level.ERROR); output.failedMessages.add(this); } }
Example #15
Source File: CruiseControlMetricsReporter.java From cruise-control with BSD 2-Clause "Simplified" License | 6 votes |
/** * Send a CruiseControlMetric to the Kafka topic. * @param ccm the Cruise Control metric to send. */ public void sendCruiseControlMetric(CruiseControlMetric ccm) { // Use topic name as key if existing so that the same sampler will be able to collect all the information // of a topic. String key = ccm.metricClassId() == CruiseControlMetric.MetricClassId.TOPIC_METRIC ? ((TopicMetric) ccm).topic() : Integer.toString(ccm.brokerId()); ProducerRecord<String, CruiseControlMetric> producerRecord = new ProducerRecord<>(_cruiseControlMetricsTopic, null, ccm.time(), key, ccm); LOG.debug("Sending Cruise Control metric {}.", ccm); _producer.send(producerRecord, new Callback() { @Override public void onCompletion(RecordMetadata recordMetadata, Exception e) { if (e != null) { LOG.warn("Failed to send Cruise Control metric {}", ccm); _numMetricSendFailure++; } } }); }
Example #16
Source File: Kafka08DataWriter.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Override public WriteResponse wrap(final RecordMetadata recordMetadata) { return new WriteResponse<RecordMetadata>() { @Override public RecordMetadata getRawResponse() { return recordMetadata; } @Override public String getStringResponse() { return recordMetadata.toString(); } @Override public long bytesWritten() { // Don't know how many bytes were written return -1; } }; }
Example #17
Source File: MetaEventWarningSender.java From DBus with Apache License 2.0 | 6 votes |
public void sendMaasAppenderMessage(MaasAppenderMessage maasAppenderMessage) { ControlMessage message = new ControlMessage(System.currentTimeMillis(), ControlType.G_MAAS_APPENDER_EVENT.toString(), "dbus-appender"); message.setPayload(JSONObject.parseObject(maasAppenderMessage.toString())); String topic = PropertiesHolder.getProperties(Constants.Properties.CONFIGURE, Constants.ConfigureKey.GLOBAL_EVENT_TOPIC); ProducerRecord<String, String> record = new ProducerRecord<>(topic, message.getType(), message.toJSONString()); Future<RecordMetadata> future = producer.send(record, (metadata, exception) -> { if (exception != null) { logger.error("Send global event error.{}", exception.getMessage()); } }); try { future.get(10000, TimeUnit.MILLISECONDS); } catch (Exception e) { logger.error(e.getMessage(), e); } }
Example #18
Source File: KafkaPeriodicBindingSetExporter.java From rya with Apache License 2.0 | 6 votes |
/** * Exports BindingSets to Kafka. The BindingSet and topic are extracted from * the indicated BindingSetRecord and the BindingSet is then exported to the topic. */ @Override public void exportNotification(final BindingSetRecord record) throws BindingSetRecordExportException { try { log.info("Exporting {} records to Kafka to topic: {}", record.getBindingSet().size(), record.getTopic()); final String bindingName = IncrementalUpdateConstants.PERIODIC_BIN_ID; final BindingSet bindingSet = record.getBindingSet(); final String topic = record.getTopic(); final long binId = ((Literal) bindingSet.getValue(bindingName)).longValue(); final Future<RecordMetadata> future = producer .send(new ProducerRecord<String, BindingSet>(topic, Long.toString(binId), bindingSet)); //wait for confirmation that results have been received future.get(5, TimeUnit.SECONDS); } catch (final Exception e) { // catch all possible exceptional behavior and throw as our checked exception. throw new BindingSetRecordExportException(e.getMessage(), e); } }
Example #19
Source File: KafkaFactory.java From nakadi with MIT License | 5 votes |
@Override public Future<RecordMetadata> send(final ProducerRecord<String, String> record, final Callback callback) { if (kafkaCrutch.brokerIpAddressChanged) { throw new KafkaCrutchException("Kafka broker ip address changed, exiting"); } return super.send(record, callback); }
Example #20
Source File: KafkaMessageProducer.java From tcc-transaction with Apache License 2.0 | 5 votes |
@Override public void sendMessage(TransactionMessage message) { Future<RecordMetadata> future= producer.send(new ProducerRecord<String, TransactionMessage>("galaxy-tx-message",message)); try { RecordMetadata metadata= future.get(timeout, TimeUnit.SECONDS); if(logger.isInfoEnabled()){ logger.info("Send message: {topic:"+metadata.topic()+",partition:"+metadata.partition()+",offset:"+metadata.offset()+"}"); } } catch (Exception e) { throw new DistributedTransactionException("Send message error: "+message,e); } }
Example #21
Source File: NewProducer.java From kafka-monitor with Apache License 2.0 | 5 votes |
@Override public RecordMetadata send(BaseProducerRecord baseRecord, boolean sync) throws Exception { ProducerRecord<String, String> record = new ProducerRecord<>(baseRecord.topic(), baseRecord.partition(), baseRecord.key(), baseRecord.value()); Future<RecordMetadata> future = _producer.send(record); return sync ? future.get() : null; }
Example #22
Source File: NotificationEventListenerKafkaIntegrationTest.java From stream-registry with Apache License 2.0 | 5 votes |
public KafkaTemplate<SpecificRecord, SpecificRecord> kafkaTemplate() { KafkaTemplate<SpecificRecord, SpecificRecord> template = new KafkaTemplate<>(producerFactory()); template.setProducerListener(new ProducerListener<>() { @Override public void onSuccess(ProducerRecord<SpecificRecord, SpecificRecord> producerRecord, RecordMetadata recordMetadata) { log.info("Produced record {}", producerRecord); producedEvents.put((AvroKey) producerRecord.key(), (AvroEvent) producerRecord.value()); producedHeaders.put((AvroKey) producerRecord.key(), producerRecord.headers()); } }); return template; }
Example #23
Source File: DefaultKafkaMessageSender.java From kafka-message-tool with MIT License | 5 votes |
private void logSentOffset(RecordMetadata recordMetadata) { String offset = "absent in record metadata"; if (recordMetadata.hasOffset()) { offset = String.valueOf(recordMetadata.offset()); } Logger.info(String.format("Record sent: topic='%s', partition=%s, offset=%s", recordMetadata.topic(), recordMetadata.partition(), offset)); }
Example #24
Source File: FlinkKafkaProducer011.java From flink with Apache License 2.0 | 5 votes |
@Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception != null && asyncException == null) { asyncException = exception; } acknowledgeMessage(); }
Example #25
Source File: DeliveryDetails.java From zerocode with Apache License 2.0 | 5 votes |
@JsonCreator public DeliveryDetails( String status, String message, Integer recordCount, RecordMetadata recordMetadata) { this.status = status; this.message = message; this.recordCount = recordCount; this.recordMetadata = recordMetadata; }
Example #26
Source File: FlinkKafkaProducer.java From flink with Apache License 2.0 | 5 votes |
@Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception != null && asyncException == null) { asyncException = exception; } acknowledgeMessage(); }
Example #27
Source File: ProduceService.java From kafka-monitor with Apache License 2.0 | 5 votes |
@Override public void run() { try { long nextIndex = currentPartition.get(partition).get(); //组装消息,time用于consumer判断消息的延迟,index用于判断消息的重复与丢失,index为当前消息的序号 JSONObject messageObj = new JSONObject(); messageObj.put("topic", MONITOR_TOPIC); messageObj.put("time", System.currentTimeMillis()); messageObj.put("partition", partition); messageObj.put("index", nextIndex); // String message = String.format("topic:%s,partition:%d,time:%s", MONITOR_TOPIC, partition, System.currentTimeMillis()); ProduceRecord produceRecord = new ProduceRecord(MONITOR_TOPIC, partition, null, messageObj.toJSONString()); RecordMetadata metadata = producer.send(produceRecord); produceMetrics.recordsProduce.record(); produceMetrics._recordsProducedPerPartition.get(partition).record(); currentPartition.get(partition).getAndIncrement(); } catch (Exception e) { produceMetrics.errorProduce.record(); produceMetrics._produceErrorPerPartition.get(partition).record(); logger.warn("failed to send message ", e); } }
Example #28
Source File: AProducerHandler.java From SO with BSD 2-Clause "Simplified" License | 5 votes |
/** * send a record to producer.<BR/> * * @param key key * @param value value */ @Override public Future<RecordMetadata> send(K key, V value) { ProducerRecord<K, V> producerRecord = new ProducerRecord<>(getTopic(), key, value); if(producer == null) { producer = createProducer(); } return producer.send(producerRecord); }
Example #29
Source File: KafkaPublisher.java From extension-kafka with Apache License 2.0 | 5 votes |
private void waitForPublishAck(Future<RecordMetadata> future, MonitorCallback monitorCallback) { long deadline = System.currentTimeMillis() + publisherAckTimeout; try { future.get(Math.max(0, deadline - System.currentTimeMillis()), TimeUnit.MILLISECONDS); monitorCallback.reportSuccess(); } catch (InterruptedException | ExecutionException | TimeoutException e) { monitorCallback.reportFailure(e); logger.warn("Encountered error while waiting for event publication", e); throw new EventPublicationFailedException( "Event publication failed, exception occurred while waiting for event publication", e ); } }
Example #30
Source File: SimpleProducer.java From kafka-platform-prometheus with Apache License 2.0 | 5 votes |
private void sendCallback(ProducerRecord<Long, String> record, RecordMetadata recordMetadata, Exception e) { if (e == null) { logger.debug("succeeded sending. offset: {}", recordMetadata.offset()); } else { logger.error("failed sending key: {}" + record.key(), e); } }