org.apache.kafka.common.errors.InterruptException Java Examples
The following examples show how to use
org.apache.kafka.common.errors.InterruptException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ProcessingPartition.java From common-kafka with Apache License 2.0 | 6 votes |
/** * Returns the earliest offset for the partition * * @return the earliest offset for the partition * * @throws IllegalStateException if the earliest offset could not be looked up */ protected long getEarliestOffset() { Map<TopicPartition, Long> offsets; try { offsets = consumer.beginningOffsets(Collections.singleton(topicPartition)); } catch (TimeoutException | InterruptException e) { throw new IllegalStateException("Unable to look up earliest offset for topic partition [" + topicPartition + "]", e); } if (!offsets.containsKey(topicPartition)) throw new IllegalStateException("Unable to look up earliest offset for topic partition [" + topicPartition + "]"); Long offset = offsets.get(topicPartition); if (offset == null) throw new IllegalStateException("Unable to look up earliest offset for topic partition [" + topicPartition + "]"); return offset; }
Example #2
Source File: ProcessingPartition.java From common-kafka with Apache License 2.0 | 6 votes |
/** * Returns the latest offset for the partition * * @return the latest offset for the partition * * @throws IllegalStateException if the latest offset could not be looked up */ protected long getLatestOffset() { Map<TopicPartition, Long> offsets; try { offsets = consumer.endOffsets(Collections.singleton(topicPartition)); } catch (TimeoutException | InterruptException e) { throw new IllegalStateException("Unable to look up latest offset for topic partition [" + topicPartition + "]", e); } if (!offsets.containsKey(topicPartition)) throw new IllegalStateException("Unable to look up latest offset for topic partition [" + topicPartition + "]"); Long offset = offsets.get(topicPartition); if (offset == null) throw new IllegalStateException("Unable to look up latest offset for topic partition [" + topicPartition + "]"); return offset; }
Example #3
Source File: KafkaPipeLine.java From bireme with Apache License 2.0 | 6 votes |
@Override public ChangeSet pollChangeSet() throws BiremeException { ConsumerRecords<String, String> records = null; try { records = consumer.poll(POLL_TIMEOUT); } catch (InterruptException e) { } if (cxt.stop || records == null || records.isEmpty()) { return null; } KafkaCommitCallback callback = new KafkaCommitCallback(); if (!commitCallbacks.offer(callback)) { String Message = "Can't add CommitCallback to queue."; throw new BiremeException(Message); } stat.recordCount.mark(records.count()); return packRecords(records, callback); }
Example #4
Source File: MongodbSourceTask.java From kafka-connect-mongodb with Apache License 2.0 | 6 votes |
/** * Poll this MongodbSourceTask for new records. * * @return a list of source records * @throws InterruptException */ @Override public List<SourceRecord> poll() throws InterruptException { List<SourceRecord> records = new ArrayList<>(); while (!reader.isEmpty()) { Document message = reader.pool(); Struct messageStruct = getStruct(message); String topic = getTopic(message); String db = getDB(message); String timestamp = getTimestamp(message); records.add(new SourceRecord(Collections.singletonMap("mongodb", db), Collections.singletonMap(db, timestamp), topic, messageStruct.schema(), messageStruct)); log.trace(message.toString()); } return records; }
Example #5
Source File: TestKafkaMirrorMakerConnectorTask.java From brooklin with BSD 2-Clause "Simplified" License | 6 votes |
@Test public void testPartitionManagedLockReleaseOnInterruptException() throws InterruptedException { Datastream datastream = KafkaMirrorMakerConnectorTestUtils.createDatastream("pizzaStream", _broker, "\\w+Pizza"); DatastreamTaskImpl task = new DatastreamTaskImpl(Collections.singletonList(datastream)); DatastreamEventProducer mockDatastreamEventProducer = mock(DatastreamEventProducer.class); doThrow(InterruptException.class).when(mockDatastreamEventProducer).flush(); task.setEventProducer(mockDatastreamEventProducer); KafkaBasedConnectorConfig connectorConfig = new KafkaBasedConnectorConfigBuilder() .setConsumerFactory(new LiKafkaConsumerFactory()) .setCommitIntervalMillis(10000) .setEnablePartitionManaged(true) .build(); ZkAdapter zkAdapter = new ZkAdapter(_kafkaCluster.getZkConnection(), "testCluster", null, ZkClient.DEFAULT_SESSION_TIMEOUT, ZkClient.DEFAULT_CONNECTION_TIMEOUT, null); task.setZkAdapter(zkAdapter); zkAdapter.connect(); KafkaMirrorMakerConnectorTaskTest connectorTask = new KafkaMirrorMakerConnectorTaskTest(connectorConfig, task, "", false, new KafkaMirrorMakerGroupIdConstructor(false, "testCluster")); Thread connectorThread = KafkaMirrorMakerConnectorTestUtils.runKafkaMirrorMakerConnectorTask(connectorTask); connectorThread.join(); Assert.assertFalse(connectorTask.isPostShutdownHookExceptionCaught()); }
Example #6
Source File: KafkaSpout.java From storm_spring_boot_demo with MIT License | 5 votes |
@Override public void activate() { try { subscribeKafkaConsumer(); } catch (InterruptException e) { throwKafkaConsumerInterruptedException(); } }
Example #7
Source File: KafkaSpout.java From storm_spring_boot_demo with MIT License | 5 votes |
@Override public void deactivate() { try { shutdown(); } catch (InterruptException e) { throwKafkaConsumerInterruptedException(); } }
Example #8
Source File: KafkaSpout.java From storm_spring_boot_demo with MIT License | 5 votes |
@Override public void close() { try { shutdown(); } catch (InterruptException e) { throwKafkaConsumerInterruptedException(); } }
Example #9
Source File: KafkaProducerWrapper.java From brooklin with BSD 2-Clause "Simplified" License | 5 votes |
void flush() { synchronized (_producerLock) { try { if (_kafkaProducer != null) { _kafkaProducer.flush(_producerFlushTimeoutMs, TimeUnit.MILLISECONDS); } } catch (InterruptException e) { // The KafkaProducer object should not be reused on an interrupted flush _log.warn("Kafka producer flush interrupted, closing producer {}.", _kafkaProducer); shutdownProducer(); throw e; } } }
Example #10
Source File: KafkaWriterTest.java From metron with Apache License 2.0 | 5 votes |
@Test public void testWriteShouldReturnErrorsOnFailedFlush() throws Exception { KafkaWriter writer = spy(new KafkaWriter()); writer.setKafkaProducer(kafkaProducer); List<BulkMessage<JSONObject>> messages = new ArrayList<>(); JSONObject message1 = new JSONObject(); message1.put("value", "message1"); JSONObject message2 = new JSONObject(); message2.put("value", "message2"); messages.add(new BulkMessage<>("messageId1", message1)); messages.add(new BulkMessage<>("messageId2", message2)); doReturn(Optional.of("topic1")).when(writer).getKafkaTopic(message1); doReturn(Optional.of("topic2")).when(writer).getKafkaTopic(message2); Future future1 = mock(Future.class); Future future2 = mock(Future.class); when(kafkaProducer.send(new ProducerRecord<String, String>("topic1", "{\"value\":\"message1\"}"))).thenReturn(future1); when(kafkaProducer.send(new ProducerRecord<String, String>("topic2", "{\"value\":\"message2\"}"))).thenReturn(future2); InterruptException throwable = new InterruptException("kafka flush exception"); doThrow(throwable).when(kafkaProducer).flush(); BulkWriterResponse response = new BulkWriterResponse(); response.addAllErrors(throwable, Arrays.asList(new MessageId("messageId1"), new MessageId("messageId2"))); assertEquals(response, writer.write(SENSOR_TYPE, createConfiguration(new HashMap<>()), messages)); verify(kafkaProducer, times(1)).flush(); verify(kafkaProducer, times(1)).send(new ProducerRecord<String, String>("topic1", "{\"value\":\"message1\"}")); verify(kafkaProducer, times(1)).send(new ProducerRecord<String, String>("topic2", "{\"value\":\"message2\"}")); verifyNoMoreInteractions(kafkaProducer); }
Example #11
Source File: TestKafkaProducerWrapper.java From brooklin with BSD 2-Clause "Simplified" License | 4 votes |
@Test public void testFlushInterrupt() throws Exception { DynamicMetricsManager.createInstance(new MetricRegistry(), getClass().getSimpleName()); Properties transportProviderProperties = new Properties(); transportProviderProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:1234"); transportProviderProperties.put(ProducerConfig.CLIENT_ID_CONFIG, "testClient"); transportProviderProperties.put(KafkaTransportProviderAdmin.ZK_CONNECT_STRING_CONFIG, "zk-connect-string"); String topicName = "random-topic-42"; MockKafkaProducerWrapper<byte[], byte[]> producerWrapper = new MockKafkaProducerWrapper<>("log-suffix", transportProviderProperties, "metrics"); String destinationUri = "localhost:1234/" + topicName; Datastream ds = DatastreamTestUtils.createDatastream("test", "ds1", "source", destinationUri, 1); DatastreamTask task = new DatastreamTaskImpl(Collections.singletonList(ds)); ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(topicName, null, null); producerWrapper.assignTask(task); // Sending first event, send should pass, none of the other methods on the producer should have been called producerWrapper.send(task, producerRecord, null); producerWrapper.verifySend(1); producerWrapper.verifyFlush(0); producerWrapper.verifyClose(0); Assert.assertEquals(producerWrapper.getNumCreateKafkaProducerCalls(), 1); // Calling the first flush() on a separate thread because the InterruptException calls Thread interrupt() on the // currently running thread. If not run on a separate thread, the test thread itself will be interrupted. ExecutorService executorService = Executors.newSingleThreadExecutor(); executorService.submit(() -> { // Flush has been mocked to throw an InterruptException Assert.assertThrows(InterruptException.class, producerWrapper::flush); }).get(); producerWrapper.verifySend(1); producerWrapper.verifyFlush(1); producerWrapper.verifyClose(1); // Second send should create a new producer, resetting flush() and close() invocation counts producerWrapper.send(task, producerRecord, null); producerWrapper.verifySend(1); producerWrapper.verifyFlush(0); producerWrapper.verifyClose(0); Assert.assertEquals(producerWrapper.getNumCreateKafkaProducerCalls(), 2); // Second producer's flush() has not been mocked to throw exceptions, this should not throw producerWrapper.flush(); producerWrapper.verifySend(1); producerWrapper.verifyFlush(1); producerWrapper.verifyClose(0); Assert.assertEquals(producerWrapper.getNumCreateKafkaProducerCalls(), 2); // Send should reuse the older producer and the counts should not be reset producerWrapper.send(task, producerRecord, null); producerWrapper.verifySend(2); producerWrapper.verifyFlush(1); producerWrapper.verifyClose(0); Assert.assertEquals(producerWrapper.getNumCreateKafkaProducerCalls(), 2); // Closing the producer's task. Since this is the only task, the producer should be closed producerWrapper.close(task); producerWrapper.verifySend(2); producerWrapper.verifyFlush(1); producerWrapper.verifyClose(1); Assert.assertEquals(producerWrapper.getNumCreateKafkaProducerCalls(), 2); }
Example #12
Source File: TestKafkaProducerWrapper.java From brooklin with BSD 2-Clause "Simplified" License | 4 votes |
MockKafkaProducerWrapper(String logSuffix, Properties props, String metricsNamesPrefix) { this(logSuffix, props, metricsNamesPrefix, InterruptException.class); }