Jave cloud pub sub

56 Jave code examples are found related to " cloud pub sub". You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that the no message is sent when a message is completely null.
 */
@Test
public void testPutWithNullMessage() {
  System.out.println("NULL MSG");
  props.put(CloudPubSubSinkConnector.MAX_BUFFER_SIZE_CONFIG, CPS_MIN_BATCH_SIZE1);
  task.start(props);
  List<SinkRecord> records = new ArrayList<>();
  records.add(
      new SinkRecord(
          KAFKA_TOPIC,
          0,
          STRING_SCHEMA,
          null,
          STRING_SCHEMA,
          null,
          -1));
  task.put(records);
  ArgumentCaptor<PubsubMessage> captor = ArgumentCaptor.forClass(PubsubMessage.class);
  verify(publisher, times(0)).publish(captor.capture());
}
 
Example 2
Source File: GoogleCloudPubSubSinkConfiguration.java    From divolte-collector with Apache License 2.0 6 votes vote down vote up
private static void createTopic(final String hostPort,
                                final TransportChannelProvider channelProvider,
                                final ProjectTopicName topic) {
    final TopicAdminClient topicClient;
    try {
        final TopicAdminSettings topicAdminSettings = TopicAdminSettings.newBuilder()
            .setTransportChannelProvider(channelProvider)
            .setCredentialsProvider(NoCredentialsProvider.create())
            .build();
        topicClient = TopicAdminClient.create(topicAdminSettings);
    } catch (final IOException e) {
        throw new UncheckedIOException(String.format("Error creating topic %s for pub/sub emulator %s",
                                                     topic, hostPort), e);
    }
    final ProjectName project = ProjectName.of(topic.getProject());
    if (Streams.stream(topicClient.listTopics(project).iterateAll())
               .map(Topic::getName)
               .map(ProjectTopicName::parse)
               .noneMatch(topic::equals)) {
        logger.info("Initializing Pub/Sub emulator topic: {}", topic);
        topicClient.createTopic(topic);
    }
}
 
Example 3
Source File: CloudPubSubSinkTask.java    From pubsub with Apache License 2.0 6 votes vote down vote up
@Override
public void stop() {
  log.info("Stopping CloudPubSubSinkTask");

  if (publisher != null) {
    log.info("Shutting down PubSub publisher");
    try {
      publisher.shutdown();
      boolean terminated = publisher.awaitTermination(maxShutdownTimeoutMs, TimeUnit.MILLISECONDS);
      if (!terminated) {
        log.warn(String.format("PubSub publisher did not terminate cleanly in %d ms", maxShutdownTimeoutMs));
      }
    } catch (Exception e) {
      // There is not much we can do here besides logging it as an error
      log.error("An exception occurred while shutting down PubSub publisher", e);
    }
  }
}
 
Example 4
Source File: GoogleCloudPubSubSinkConfiguration.java    From divolte-collector with Apache License 2.0 6 votes vote down vote up
private SinkFactory createFlushingPool(final RetrySettings retrySettings,
                                       final BatchingSettings batchingSettings) {
    return (vc, sinkName, registry) -> {
        final String projectId = vc.configuration().global.gcps.projectId.orElseThrow(IllegalStateException::new);
        final ProjectTopicName topicName = ProjectTopicName.of(projectId, topic);
        final Publisher.Builder builder =
            Publisher.newBuilder(topicName)
                     .setRetrySettings(retrySettings)
                     .setBatchingSettings(batchingSettings);
        final Publisher publisher = IOExceptions.wrap(builder::build).get();
        return new GoogleCloudPubSubFlushingPool(sinkName,
                                                 vc.configuration().global.gcps.threads,
                                                 vc.configuration().global.gcps.bufferSize,
                                                 publisher,
                                                 Optional.empty(),
                                                 registry.getSchemaBySinkName(sinkName));
    };
}
 
Example 5
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that when a call to ackMessages() fails, that the message is not redelivered to Kafka if
 * the message is received again by Cloud Pub/Sub. Also tests that ack ids are added properly if
 * the ack id has not been seen before.
 */
@Test
public void testPollWithDuplicateReceivedMessages() throws Exception {
  task.start(props);
  ReceivedMessage rm1 = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm1).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  assertEquals(1, result.size());
  ReceivedMessage rm2 = createReceivedMessage(ACK_ID2, CPS_MESSAGE, new HashMap<String, String>());
  stubbedPullResponse =
      PullResponse.newBuilder().addReceivedMessages(0, rm1).addReceivedMessages(1, rm2).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  result = task.poll();
  assertEquals(1, result.size());
}
 
Example 6
Source File: CloudPubSubGRPCSubscriber.java    From pubsub with Apache License 2.0 6 votes vote down vote up
private void makeSubscriber() {
  try {
    log.info("Creating subscriber.");
    SubscriberStubSettings subscriberStubSettings =
    SubscriberStubSettings.newBuilder()
      .setTransportChannelProvider(
          SubscriberStubSettings.defaultGrpcTransportProviderBuilder()
              .setMaxInboundMessageSize(20 << 20) // 20MB
              .build())
      .setCredentialsProvider(gcpCredentialsProvider)
      .build();
    subscriber = GrpcSubscriberStub.create(subscriberStubSettings);
    // We change the subscriber every 25 - 35 minutes in order to avoid GOAWAY errors.
    nextSubscriberResetTime =
        System.currentTimeMillis() + rand.nextInt(10 * 60 * 1000) + 25 * 60 * 1000;
  } catch (IOException e) {
    throw new RuntimeException("Could not create subscriber stub; no subscribing can occur.", e);
  }
}
 
Example 7
Source File: CloudPubSubSinkTask.java    From pubsub with Apache License 2.0 6 votes vote down vote up
private Iterable<? extends Header> getRecordHeaders(SinkRecord record) {
  ConnectHeaders headers = new ConnectHeaders();
  if(record.headers() != null) {
    int headerCount = 0;
    for (Header header : record.headers()) {
      if (header.key().getBytes().length < 257 &&
          String.valueOf(header.value()).getBytes().length < 1025) {
        headers.add(header);
        headerCount++;
      }
      if (headerCount > 100) {
        break;
      }
    }
  }
  return headers;
}
 
Example 8
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests when the message(s) retrieved from Cloud Pub/Sub do have an attribute that matches {@link
 * #KAFKA_MESSAGE_KEY_ATTRIBUTE}.
 */
@Test
public void testPollWithMessageKeyAttribute() throws Exception {
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected, result.get(0));
}
 
Example 9
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests when the message(s) retrieved from Cloud Pub/Sub do not have an attribute that matches
 * {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE}.
 */
@Test
public void testPollWithNoMessageKeyAttribute() throws Exception {
  task.start(props);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected, result.get(0));
}
 
Example 10
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Compare two SourceRecords. This is necessary because the records' values contain a byte[] and
 * the .equals on a SourceRecord does not take this into account.
 */
public void assertRecordsEqual(SourceRecord sr1, SourceRecord sr2) {
  assertEquals(sr1.key(), sr2.key());
  assertEquals(sr1.keySchema(), sr2.keySchema());
  assertEquals(sr1.valueSchema(), sr2.valueSchema());
  assertEquals(sr1.topic(), sr2.topic());

  if (sr1.valueSchema() == Schema.BYTES_SCHEMA) {
    assertArrayEquals((byte[])sr1.value(), (byte[])sr2.value());
  } else {
    for(Field f : sr1.valueSchema().fields()) {
      if (f.name().equals(ConnectorUtils.KAFKA_MESSAGE_CPS_BODY_FIELD)) {
        assertArrayEquals(((Struct)sr1.value()).getBytes(f.name()),
                          ((Struct)sr2.value()).getBytes(f.name()));
      } else {
        assertEquals(((Struct)sr1.value()).getString(f.name()),
                     ((Struct)sr2.value()).getString(f.name()));
      }
    }
  }
}
 
Example 11
Source File: GoogleCloudPubSubFlusherTest.java    From divolte-collector with Apache License 2.0 6 votes vote down vote up
@Test
public void testMessagesHaveSchemaFingerprint() {
    processSingleMessage();
    // Reminder: fingerprint is the SHA-256 hash of the normalized schema,
    //           base-64 encoded using the URL-safe encoding,
    //           with trailing padding stripped.
    final String expectedFingerPrint =
        BaseEncoding.base64Url()
                    .encode(Hashing.sha256()
                                   .hashString(SchemaNormalization.toParsingForm(MINIMAL_SCHEMA),
                                               StandardCharsets.UTF_8)
                                   .asBytes())
                    .replace("=", "");
    final PubsubMessage deliveredMessage = getFirstPublishedMessage();
    assertEquals(expectedFingerPrint, deliveredMessage.getAttributesOrThrow("schemaFingerprint"));
}
 
Example 12
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
@Test(expected = DataException.class)
public void testStructSchemaWithNestedSchema() {
  task.start(props);

  Schema nestedSchema = SchemaBuilder.struct().build();
  Struct nestedVal = new Struct(nestedSchema);

  Schema schema = SchemaBuilder.struct().field(FIELD_STRING1, SchemaBuilder.string())
      .field(FIELD_STRING2, nestedSchema).build();
  Struct val = new Struct(schema);
  val.put(FIELD_STRING1, "tide");
  val.put(FIELD_STRING2, nestedVal);
  SinkRecord record = new SinkRecord(null, -1, null, null, schema, val, -1);
  List<SinkRecord> list = new ArrayList<>();
  list.add(record);
  task.put(list);
}
 
Example 13
Source File: GoogleCloudPubSubFlusherTest.java    From divolte-collector with Apache License 2.0 6 votes vote down vote up
@Test
public void testMessageBatchSentToPublisher() {
    final Publisher publisher = mockPublisher.orElseThrow(IllegalStateException::new);

    // Process a bunch of messages.
    final DivolteSchema schema = new DivolteSchema(MINIMAL_SCHEMA, Optional.empty());
    final GoogleCloudPubSubFlusher flusher = new GoogleCloudPubSubFlusher(publisher, schema);
    final Queue<Item<AvroRecordBuffer>> items =
        Stream.generate(this::generateMessage)
              .limit(10)
              .map(this::itemFromAvroRecordBuffer)
              .collect(Collectors.toCollection(() -> new ArrayBlockingQueue<>(10)));
    flusher.process(items);

    // Check the messages were all forwarded to the publisher.
    verify(publisher, times(10)).publish(any(PubsubMessage.class));
    verifyNoMoreInteractions(publisher);
}
 
Example 14
Source File: GoogleCloudPubSubSinkConfigurationTest.java    From divolte-collector with Apache License 2.0 5 votes vote down vote up
@Test
public void testDefaultBatchingConfigurationValid() {
    // Check that we can generate settings from our defaults.
    final BatchingSettings batchingSettings =
        GoogleCloudPubSubSinkConfiguration.DEFAULT_BATCHING_SETTINGS.createBatchingSettings();
    assertNotNull(batchingSettings);
}
 
Example 15
Source File: GoogleCloudPubSubFlusher.java    From divolte-collector with Apache License 2.0 5 votes vote down vote up
@Override
protected PubsubMessage buildRecord(final AvroRecordBuffer record) {
    final PubsubMessage.Builder builder = PubsubMessage.newBuilder()
        .putAttributes(MESSAGE_ATTRIBUTE_SCHEMA_FINGERPRINT, schemaFingerprint)
        .putAttributes(MESSAGE_ATTRIBUTE_PARTYID, record.getPartyId().toString())
        .putAttributes(MESSAGE_ATTRIBUTE_EVENTID, record.getEventId())
        .putAttributes(MESSAGE_ATTRIBUTE_TIMESTAMP, DateTimeFormatter.ISO_INSTANT.format(record.getTimestamp()))
        .setData(ByteString.copyFrom(record.getByteBuffer()));
    return schemaConfluentId
        .map(id -> builder.putAttributes(MESSAGE_ATTRIBUTE_SCHEMA_CONFLUENT_ID, id))
        .orElse(builder)
        .build();
}
 
Example 16
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
/**
 * Tests when the message retrieved from Cloud Pub/Sub have several attributes, including
 * one that matches {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE} and uses Kafka Record Headers to store them
 */
@Test
public void testPollWithMultipleAttributesAndRecordHeaders() throws Exception {
  props.put(CloudPubSubSourceConnector.USE_KAFKA_HEADERS, "true");
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  attributes.put("attribute1", "attribute_value1");
  attributes.put("attribute2", "attribute_value2");
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());

  ConnectHeaders headers = new ConnectHeaders();
  headers.addString("attribute1", "attribute_value1");
  headers.addString("attribute2", "attribute_value2");

  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE,
          Long.parseLong(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE),
          headers);
  assertRecordsEqual(expected, result.get(0));
}
 
Example 17
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
@Test(expected = DataException.class)
public void testStructSchemaWithMissingField() {
  task.start(props);

  Schema schema = SchemaBuilder.struct().field(FIELD_STRING1, SchemaBuilder.string())
      .field(FIELD_STRING2, SchemaBuilder.string()).build();
  Struct val = new Struct(schema);
  val.put(FIELD_STRING1, "tide");
  SinkRecord record = new SinkRecord(null, -1, null, null, schema, val, -1);
  List<SinkRecord> list = new ArrayList<>();
  list.add(record);
  task.put(list);
}
 
Example 18
Source File: GoogleCloudPubSubFlusherTest.java    From divolte-collector with Apache License 2.0 5 votes vote down vote up
private void processSingleMessage(final Optional<Integer> confluentId) {
    final Publisher publisher = mockPublisher.orElseThrow(IllegalStateException::new);

    // Process a single message.
    final DivolteSchema schema = new DivolteSchema(MINIMAL_SCHEMA, confluentId);
    final GoogleCloudPubSubFlusher flusher = new GoogleCloudPubSubFlusher(publisher, schema);
    if (ItemProcessor.ProcessingDirective.PAUSE == flusher.process(itemFromAvroRecordBuffer(generateMessage()))) {
        flusher.heartbeat();
    }
}
 
Example 19
Source File: CloudPubSubSourceTask.java    From pubsub with Apache License 2.0 5 votes vote down vote up
@Override
public void commitRecord(SourceRecord record) {
  String ackId = record.sourceOffset().get(cpsSubscription).toString();
  deliveredAckIds.add(ackId);
  ackIds.remove(ackId);
  log.trace("Committed {}", ackId);
}
 
Example 20
Source File: GoogleCloudPubSubConfiguration.java    From divolte-collector with Apache License 2.0 5 votes vote down vote up
@JsonCreator
GoogleCloudPubSubConfiguration(final int bufferSize,
                               final int threads,
                               final boolean enabled,
                               @Nullable final String projectId) {
    super(bufferSize, threads, enabled);
    this.projectId = null != projectId ? Optional.of(projectId) : getDefaultProjectId();
}
 
Example 21
Source File: GoogleCloudPubSubFlusherTest.java    From divolte-collector with Apache License 2.0 5 votes vote down vote up
@Test
public void testMessagesHaveEventIdAttribute() {
    processSingleMessage();
    final PubsubMessage deliveredMessage = getFirstPublishedMessage();
    assertEquals(sessionId.orElseThrow(IllegalStateException::new).toString() + "-0",
                 deliveredMessage.getAttributesOrThrow("eventIdentifier"));
}
 
Example 22
Source File: GoogleCloudPubSubFlusherTest.java    From divolte-collector with Apache License 2.0 5 votes vote down vote up
@Test
public void testMessageBodyIsNakedAvroRecord() throws IOException {
    processSingleMessage();
    final PubsubMessage deliveredMessage = getFirstPublishedMessage();
    final ByteString body = deliveredMessage.getData();

    final DatumReader<GenericRecord> reader = new GenericDatumReader<>(MINIMAL_SCHEMA);
    final Decoder decoder = DecoderFactory.get().binaryDecoder(body.newInput(), null);
    final GenericRecord record = reader.read(null, decoder);
    assertEquals(partyId.orElseThrow(IllegalStateException::new).toString(), record.get("partyId").toString());
    assertEquals(sessionId.orElseThrow(IllegalStateException::new).toString(), record.get("sessionId").toString());
    assertEquals(0L, record.get("counter"));
}
 
Example 23
Source File: GoogleCloudPubSubFlushingPool.java    From divolte-collector with Apache License 2.0 5 votes vote down vote up
public GoogleCloudPubSubFlushingPool(final String name,
                                     final int numThreads,
                                     final int maxWriteQueue,
                                     final Publisher publisher,
                                     final Optional<ManagedChannel> channel,
                                     final DivolteSchema schema) {
    super(numThreads,
          maxWriteQueue,
          String.format("Google Cloud Pub/Sub Flusher [%s]", Objects.requireNonNull(name)),
          () -> new GoogleCloudPubSubFlusher(publisher, schema));
    this.publisher = Objects.requireNonNull(publisher);
    this.channel = Objects.requireNonNull(channel);
}
 
Example 24
Source File: GoogleCloudPubSubSinkConfiguration.java    From divolte-collector with Apache License 2.0 5 votes vote down vote up
@JsonCreator
@ParametersAreNullableByDefault
GoogleCloudPubSubSinkConfiguration(@JsonProperty(defaultValue=DEFAULT_TOPIC) final String topic,
                                   final GooglePubSubRetryConfiguration retrySettings,
                                   final GoogleBatchingConfiguration batchingSettings) {
    super(topic);
    this.retrySettings = Optional.ofNullable(retrySettings).orElse(DEFAULT_RETRY_SETTINGS);
    this.batchingSettings = Optional.ofNullable(batchingSettings).orElse(DEFAULT_BATCHING_SETTINGS);
}
 
Example 25
Source File: GoogleCloudPubSubFlusherTest.java    From divolte-collector with Apache License 2.0 5 votes vote down vote up
private AvroRecordBuffer generateMessage() {
    final DivolteIdentifier partyId = this.partyId.orElseThrow(IllegalStateException::new);
    final DivolteIdentifier sessionId = this.sessionId.orElseThrow(IllegalStateException::new);
    final String eventId = sessionId.toString() + '-' + Long.toHexString(generatedEventCounter);
    final GenericRecord record = new GenericRecordBuilder(MINIMAL_SCHEMA)
        .set("partyId", partyId.toString())
        .set("sessionId", sessionId.toString())
        .set("counter", generatedEventCounter++)
        .build();
    return AvroRecordBuffer.fromRecord(partyId, sessionId, eventId, EVENT_TIMESTAMP, record);
}
 
Example 26
Source File: GoogleCloudPubSubFlusher.java    From divolte-collector with Apache License 2.0 5 votes vote down vote up
private static String schemaFingerprint(final DivolteSchema schema) {
    final Schema avroSchema = schema.avroSchema;
    final byte[] fingerprint;
    // SHA-256 is on the list of mandatory JCE algorithms, so this shouldn't be an issue.
    try {
        fingerprint = SchemaNormalization.parsingFingerprint("SHA-256", avroSchema);
    } catch (final NoSuchAlgorithmException e) {
        throw new RuntimeException("Cannot calculate schema fingerprint; missing SHA-256 digest algorithm", e);
    }
    return FINGERPRINT_ENCODER.encodeToString(fingerprint);
}
 
Example 27
Source File: GoogleCloudPubSubConfiguration.java    From divolte-collector with Apache License 2.0 5 votes vote down vote up
private static Optional<String> getDefaultProjectId() {
    final Optional<String> projectId = Optional.ofNullable(ServiceOptions.getDefaultProjectId());
    if (projectId.isPresent()) {
        logger.info("Discovered default Google Cloud project: {}", projectId.get());
    } else {
        logger.debug("No default Google Cloud project available.");
    }
    return projectId;
}
 
Example 28
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
/**
 * Get some PubsubMessage's which correspond to the SinkRecord's created in {@link
 * #getSampleRecords()}.
 */
private List<PubsubMessage> getPubsubMessagesFromSampleRecords() {
  List<PubsubMessage> messages = new ArrayList<>();
  Map<String, String> attributes = new HashMap<>();
  attributes.put(ConnectorUtils.CPS_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY);
  messages.add(
      PubsubMessage.newBuilder().putAllAttributes(attributes).setData(KAFKA_MESSAGE1).build());
  messages.add(
      PubsubMessage.newBuilder().putAllAttributes(attributes).setData(KAFKA_MESSAGE2).build());
  return messages;
}
 
Example 29
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
@Test
public void testPublisherShutdownOnStop() throws Exception {
  int maxShutdownTimeoutMs = 20000;
  props.put(CloudPubSubSinkConnector.MAX_SHUTDOWN_TIMEOUT_MS, Integer.toString(maxShutdownTimeoutMs));

  task.start(props);
  task.stop();

  verify(publisher, times(1)).shutdown();
  verify(publisher, times(1)).awaitTermination(maxShutdownTimeoutMs, TimeUnit.MILLISECONDS);
}
 
Example 30
Source File: CloudPubSubSourceTask.java    From pubsub with Apache License 2.0 5 votes vote down vote up
private Long getLongValue(String timestamp) {
  if (timestamp == null) {
    return null;
  }
  try {
    return Long.valueOf(timestamp);
  } catch (NumberFormatException e) {
    log.error("Error while converting `{}` to number", timestamp, e);
  }
  return null;
}
 
Example 31
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the correct message is sent to the publisher.
 */
@Test
public void testPutWherePublishesAreInvoked() {
  props.put(CloudPubSubSinkConnector.MAX_BUFFER_SIZE_CONFIG, CPS_MIN_BATCH_SIZE1);
  task.start(props);
  List<SinkRecord> records = getSampleRecords();
  task.put(records);
  ArgumentCaptor<PubsubMessage> captor = ArgumentCaptor.forClass(PubsubMessage.class);
  verify(publisher, times(2)).publish(captor.capture());
  List<PubsubMessage> requestArgs = captor.getAllValues();
  assertEquals(requestArgs, getPubsubMessagesFromSampleRecords());
}
 
Example 32
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
@Override
public V get() throws ExecutionException {
  if (exception != null) {
    throw new ExecutionException(exception);
  }
  return value;
}
 
Example 33
Source File: CloudPubSubSourceTask.java    From pubsub with Apache License 2.0 5 votes vote down vote up
/** Return the partition a message should go to based on {@link #kafkaPartitionScheme}. */
private Integer selectPartition(Object key, Object value) {
  if (kafkaPartitionScheme.equals(PartitionScheme.HASH_KEY)) {
    return key == null ? 0 : Math.abs(key.hashCode()) % kafkaPartitions;
  } else if (kafkaPartitionScheme.equals(PartitionScheme.HASH_VALUE)) {
    return Math.abs(value.hashCode()) % kafkaPartitions;
  } if (kafkaPartitionScheme.equals(PartitionScheme.KAFKA_PARTITIONER)) {
    return null;
  } else {
    currentRoundRobinPartition = ++currentRoundRobinPartition % kafkaPartitions;
    return currentRoundRobinPartition;
  }
}
 
Example 34
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
/**
 * Tests when the message retrieved from Cloud Pub/Sub have several attributes, including
 * one that matches {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE}
 */
@Test
public void testPollWithMultipleAttributes() throws Exception {
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  attributes.put("attribute1", "attribute_value1");
  attributes.put("attribute2", "attribute_value2");
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  Schema expectedSchema =
      SchemaBuilder.struct()
          .field(ConnectorUtils.KAFKA_MESSAGE_CPS_BODY_FIELD, Schema.BYTES_SCHEMA)
          .field("attribute1", Schema.STRING_SCHEMA)
          .field("attribute2", Schema.STRING_SCHEMA)
          .build();
  Struct expectedValue = new Struct(expectedSchema)
                             .put(ConnectorUtils.KAFKA_MESSAGE_CPS_BODY_FIELD, KAFKA_VALUE)
                             .put("attribute1", "attribute_value1")
                             .put("attribute2", "attribute_value2");
  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
          expectedSchema,
          expectedValue);
  assertRecordsEqual(expected, result.get(0));
}
 
Example 35
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
@Test
public void testStructSchema() {
  task.start(props);
  Schema schema = SchemaBuilder.struct().field(FIELD_STRING1, SchemaBuilder.string())
      .field(FIELD_STRING2, SchemaBuilder.string()).build();
  Struct val = new Struct(schema);
  val.put(FIELD_STRING1, "tide");
  val.put(FIELD_STRING2, "eagle");
  SinkRecord record = new SinkRecord(null, -1, null, null, schema, val, -1);
  List<SinkRecord> list = new ArrayList<>();
  list.add(record);
  task.put(list);
}
 
Example 36
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
/** Tests that an exception is thrown when the schema of the value is not BYTES. */
@Test
public void testPutPrimitives() {
  task.start(props);
  SinkRecord record8 =
      new SinkRecord(null, -1, null, null, SchemaBuilder.int8(), (byte) 5, -1);
  SinkRecord record16 =
      new SinkRecord(null, -1, null, null, SchemaBuilder.int16(), (short) 5, -1);
  SinkRecord record32 =
      new SinkRecord(null, -1, null, null, SchemaBuilder.int32(), (int) 5, -1);
  SinkRecord record64 =
      new SinkRecord(null, -1, null, null, SchemaBuilder.int64(), (long) 5, -1);
  SinkRecord recordFloat32 =
      new SinkRecord(null, -1, null, null, SchemaBuilder.float32(), (float) 8, -1);
  SinkRecord recordFloat64 =
      new SinkRecord(null, -1, null, null, SchemaBuilder.float64(), (double) 8, -1);
  SinkRecord recordBool =
      new SinkRecord(null, -1, null, null, SchemaBuilder.bool(), true, -1);
  SinkRecord recordString =
      new SinkRecord(null, -1, null, null, SchemaBuilder.string(), "Test put.", -1);
  List<SinkRecord> list = new ArrayList<>();
  list.add(record8);
  list.add(record16);
  list.add(record32);
  list.add(record64);
  list.add(recordFloat32);
  list.add(recordFloat64);
  list.add(recordBool);
  list.add(recordString);
  task.put(list);
}
 
Example 37
Source File: CloudPubSubSourceConnector.java    From pubsub with Apache License 2.0 5 votes vote down vote up
public static PartitionScheme getEnum(String value) {
  if (value.equals("round_robin")) {
    return PartitionScheme.ROUND_ROBIN;
  } else if (value.equals("hash_key")) {
    return PartitionScheme.HASH_KEY;
  } else if (value.equals("hash_value")) {
    return PartitionScheme.HASH_VALUE;
  } else if (value.equals("kafka_partitioner")) {
    return PartitionScheme.KAFKA_PARTITIONER;
  } else {
    return null;
  }
}
 
Example 38
Source File: GoogleCloudPubSubFlusher.java    From divolte-collector with Apache License 2.0 4 votes vote down vote up
@Override
protected ImmutableList<PubsubMessage> sendBatch(final List<PubsubMessage> batch) throws InterruptedException {
    // For Pub/Sub we assume the following:
    //  - Batching behaviour is set to flush everything ASAP.
    //  - Retry behaviour will retry indefinitely, so long as it seems likely to succeed.

    // First start sending the messages.
    // (This will serialize them, determine the partition and then assign them to a per-partition buffer.)
    final int batchSize = batch.size();
    final List<ApiFuture<String>> sendResults =
        batch.stream()
            .map(publisher::publish)
            .collect(Collectors.toCollection(() -> new ArrayList<>(batchSize)));

    // At this point the messages are in flight, and we assume being flushed.
    // When they eventually complete, each message can be in one of several states:
    //  - Completed.
    //  - An error occurred, but a retry may succeed.
    //  - A fatal error occurred.
    final ImmutableList.Builder<PubsubMessage> remaining = ImmutableList.builder();
    for (int i = 0; i < batchSize; ++i) {
        final ApiFuture<String> pendingResult = sendResults.get(i);
        try {
            final String messageId = pendingResult.get();
            if (logger.isDebugEnabled()) {
                final PubsubMessage message = batch.get(i);
                logger.debug("Finished sending event (partyId={}, eventId={}) to Pub/Sub: messageId = {}",
                             message.getAttributesOrDefault(MESSAGE_ATTRIBUTE_PARTYID, "N/A"),
                             message.getAttributesOrDefault(MESSAGE_ATTRIBUTE_EVENTID, "N/A"),
                             messageId);
            }
        } catch (final ExecutionException e) {
            final PubsubMessage message = batch.get(i);
            // The Pub/Sub publisher internally has a retry policy, but outside that we also
            // retry indefinitely unless it's a cause that we don't understand.
            final Throwable cause = e.getCause();
            if (cause instanceof ApiException) {
                final ApiException apiException = (ApiException) cause;
                if (apiException.isRetryable()) {
                    logger.debug("Transient error sending event (partyId={}, eventId={}) to Pub/Sub; retrying.",
                                 message.getAttributesOrDefault(MESSAGE_ATTRIBUTE_PARTYID, "N/A"),
                                 message.getAttributesOrDefault(MESSAGE_ATTRIBUTE_EVENTID, "N/A"),
                                 cause);
                    remaining.add(message);
                } else {
                    logger.warn("Permanent error sending event (partyId={}, eventId={}) to Pub/Sub; abandoning.",
                                message.getAttributesOrDefault(MESSAGE_ATTRIBUTE_PARTYID, "N/A"),
                                message.getAttributesOrDefault(MESSAGE_ATTRIBUTE_EVENTID, "N/A"),
                                cause);
                }
            } else {
                logger.error("Unknown error sending event (partyId={}, eventId={}) to Pub/Sub; abandoning.",
                             message.getAttributesOrDefault(MESSAGE_ATTRIBUTE_PARTYID, "N/A"),
                             message.getAttributesOrDefault(MESSAGE_ATTRIBUTE_EVENTID, "N/A"),
                             cause);
            }
        }
    }
    return remaining.build();
}
 
Example 39
Source File: CloudPubSubSourceTask.java    From pubsub with Apache License 2.0 4 votes vote down vote up
@Override
public List<SourceRecord> poll() throws InterruptedException {
  ackMessages();
  log.debug("Polling...");
  PullRequest request =
      PullRequest.newBuilder()
          .setSubscription(cpsSubscription)
          .setReturnImmediately(false)
          .setMaxMessages(cpsMaxBatchSize)
          .build();
  try {
    PullResponse response = subscriber.pull(request).get();
    List<SourceRecord> sourceRecords = new ArrayList<>();
    log.trace("Received " + response.getReceivedMessagesList().size() + " messages");
    for (ReceivedMessage rm : response.getReceivedMessagesList()) {
      PubsubMessage message = rm.getMessage();
      String ackId = rm.getAckId();
      // If we are receiving this message a second (or more) times because the ack for it failed
      // then do not create a SourceRecord for this message. In case we are waiting for ack
      // response we also skip the message
      if (ackIds.contains(ackId) || deliveredAckIds.contains(ackId) || ackIdsInFlight.contains(ackId)) {
        continue;
      }
      ackIds.add(ackId);
      Map<String, String> messageAttributes = message.getAttributesMap();
      String key = messageAttributes.get(kafkaMessageKeyAttribute);
      Long timestamp = getLongValue(messageAttributes.get(kafkaMessageTimestampAttribute));
      if (timestamp == null){
        timestamp = Timestamps.toMillis(message.getPublishTime());
      }
      ByteString messageData = message.getData();
      byte[] messageBytes = messageData.toByteArray();

      boolean hasCustomAttributes = !standardAttributes.containsAll(messageAttributes.keySet());

      Map<String,String> ack = Collections.singletonMap(cpsSubscription, ackId);
      SourceRecord record = null;
      if (hasCustomAttributes) {
        if (useKafkaHeaders) {
          record = createRecordWithHeaders(messageAttributes, ack, key, messageBytes, timestamp);
        } else {
          record = createRecordWithStruct(messageAttributes, ack, key, messageBytes, timestamp);
        }
      } else {
        record =
          new SourceRecord(
              null,
              ack,
              kafkaTopic,
              selectPartition(key, messageBytes),
              Schema.OPTIONAL_STRING_SCHEMA,
              key,
              Schema.BYTES_SCHEMA,
              messageBytes,
              timestamp);
      }
      sourceRecords.add(record);
    }
    return sourceRecords;
  } catch (Exception e) {
    log.info("Error while retrieving records, treating as an empty poll. " + e);
    return new ArrayList<>();
  }
}
 
Example 40
Source File: CloudPubSubSourceTask.java    From pubsub with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public CloudPubSubSourceTask(CloudPubSubSubscriber subscriber) {
  this.subscriber = subscriber;
}
 
Example 41
Source File: CloudPubSubRoundRobinSubscriber.java    From pubsub with Apache License 2.0 4 votes vote down vote up
public CloudPubSubRoundRobinSubscriber(int subscriberCount, CredentialsProvider gcpCredentialsProvider) {
  subscribers = new ArrayList<>();
  for (int i = 0; i < subscriberCount; ++i) {
    subscribers.add(new CloudPubSubGRPCSubscriber(gcpCredentialsProvider));
  }
}
 
Example 42
Source File: GoogleCloudPubSubFlusher.java    From divolte-collector with Apache License 2.0 4 votes vote down vote up
GoogleCloudPubSubFlusher(final Publisher publisher, final DivolteSchema schema) {
    this.publisher = Objects.requireNonNull(publisher);
    this.schemaFingerprint = schemaFingerprint(schema);
    this.schemaConfluentId = schema.confluentId.map(i -> "0x" + Integer.toHexString(i));
}
 
Example 43
Source File: GCloudEmulatorManager.java    From flink with Apache License 2.0 4 votes vote down vote up
public static String getDockerPubSubPort() {
	if (pubsubPort == null) {
		throw new IllegalStateException("The docker has not yet been started (yet) so you cannot get the port information yet.");
	}
	return pubsubPort;
}
 
Example 44
Source File: AutoscalingPolicyQueueBasedScaling.java    From google-api-java-client-services with Apache License 2.0 4 votes vote down vote up
/**
 * Configuration for Cloud Pub/Sub subscription queue.
 * @param cloudPubSub cloudPubSub or {@code null} for none
 */
public AutoscalingPolicyQueueBasedScaling setCloudPubSub(AutoscalingPolicyQueueBasedScalingCloudPubSub cloudPubSub) {
  this.cloudPubSub = cloudPubSub;
  return this;
}
 
Example 45
Source File: GCloudEmulatorManager.java    From flink with Apache License 2.0 4 votes vote down vote up
public static String getDockerPubSubPort() {
	if (pubsubPort == null) {
		throw new IllegalStateException("The docker has not yet been started (yet) so you cannot get the port information yet.");
	}
	return pubsubPort;
}
 
Example 46
Source File: CloudPubSubSourceConnectorTest.java    From pubsub with Apache License 2.0 4 votes vote down vote up
@Test(expected = ConfigException.class)
public void testStartWhenRequiredConfigMissing() {
  connector.start(new HashMap<String, String>());
}
 
Example 47
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 4 votes vote down vote up
public SpyableFuture(V value) {
  this.value = value;
}
 
Example 48
Source File: GoogleCloudPubSubFlusherTest.java    From divolte-collector with Apache License 2.0 4 votes vote down vote up
@Test
public void testMessagesHaveTimestampAttribute() {
    processSingleMessage();
    final PubsubMessage deliveredMessage = getFirstPublishedMessage();
    assertEquals("2018-09-14T13:32:10.034261025Z", deliveredMessage.getAttributesOrThrow("timestamp"));
}
 
Example 49
Source File: CloudPubSubSinkTask.java    From pubsub with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public CloudPubSubSinkTask(Publisher publisher) {
  this.publisher = publisher;
}
 
Example 50
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that when requested, Kafka metadata is included in the messages published to Cloud
 * Pub/Sub.
 */
@Test
public void testKafkaMetadata() {
  props.put(CloudPubSubSinkConnector.PUBLISH_KAFKA_METADATA, "true");
  props.put(CloudPubSubSinkConnector.MAX_BUFFER_SIZE_CONFIG, CPS_MIN_BATCH_SIZE1);
  task.start(props);
  List<SinkRecord> records = new ArrayList<SinkRecord>();
  records.add(
      new SinkRecord(
          KAFKA_TOPIC,
          4,
          STRING_SCHEMA,
          KAFKA_MESSAGE_KEY,
          BYTE_STRING_SCHEMA,
          KAFKA_MESSAGE1,
          1000,
          50000L,
          TimestampType.CREATE_TIME));
  records.add(
      new SinkRecord(
          KAFKA_TOPIC,
          4,
          STRING_SCHEMA,
          KAFKA_MESSAGE_KEY,
          BYTE_STRING_SCHEMA,
          KAFKA_MESSAGE2,
          1001,
          50001L,
          TimestampType.CREATE_TIME));
  task.put(records);
  ArgumentCaptor<PubsubMessage> captor = ArgumentCaptor.forClass(PubsubMessage.class);
  verify(publisher, times(2)).publish(captor.capture());
  List<PubsubMessage> requestArgs = captor.getAllValues();


  List<PubsubMessage> expectedMessages = new ArrayList<>();
  Map<String, String> attributes1 = new HashMap<>();
  attributes1.put(ConnectorUtils.CPS_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY);
  attributes1.put(ConnectorUtils.KAFKA_TOPIC_ATTRIBUTE, KAFKA_TOPIC);
  attributes1.put(ConnectorUtils.KAFKA_PARTITION_ATTRIBUTE, "4");
  attributes1.put(ConnectorUtils.KAFKA_OFFSET_ATTRIBUTE, "1000");
  attributes1.put(ConnectorUtils.KAFKA_TIMESTAMP_ATTRIBUTE, "50000");
  expectedMessages.add(
      PubsubMessage.newBuilder().putAllAttributes(attributes1).setData(KAFKA_MESSAGE1).build());
  Map<String, String> attributes2 = new HashMap<>();
  attributes2.put(ConnectorUtils.CPS_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY);
  attributes2.put(ConnectorUtils.KAFKA_TOPIC_ATTRIBUTE, KAFKA_TOPIC);
  attributes2.put(ConnectorUtils.KAFKA_PARTITION_ATTRIBUTE, "4");
  attributes2.put(ConnectorUtils.KAFKA_OFFSET_ATTRIBUTE, "1001");
  attributes2.put(ConnectorUtils.KAFKA_TIMESTAMP_ATTRIBUTE, "50001");
  expectedMessages.add(
      PubsubMessage.newBuilder().putAllAttributes(attributes2).setData(KAFKA_MESSAGE2).build());

  assertEquals(requestArgs, expectedMessages);
}
 
Example 51
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the correct partition is assigned when the partition scheme is "round_robin". The
 * tests makes sure to submit an approrpriate number of messages to poll() so that all partitions
 * in the round robin are hit once.
 */
@Test
public void testPollWithPartitionSchemeRoundRobin() throws Exception {
  task.start(props);
  ReceivedMessage rm1 = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  ReceivedMessage rm2 = createReceivedMessage(ACK_ID2, CPS_MESSAGE, new HashMap<String, String>());
  ReceivedMessage rm3 = createReceivedMessage(ACK_ID3, CPS_MESSAGE, new HashMap<String, String>());
  ReceivedMessage rm4 = createReceivedMessage(ACK_ID4, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse =
      PullResponse.newBuilder()
          .addReceivedMessages(0, rm1)
          .addReceivedMessages(1, rm2)
          .addReceivedMessages(2, rm3)
          .addReceivedMessages(3, rm4)
          .build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(4, result.size());
  SourceRecord expected1 =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  SourceRecord expected2 =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          1,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  SourceRecord expected3 =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          2,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  SourceRecord expected4 =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected1, result.get(0));
  assertRecordsEqual(expected2, result.get(1));
  assertRecordsEqual(expected3, result.get(2));
  assertRecordsEqual(expected4, result.get(3));
}
 
Example 52
Source File: CloudPubSubSourceTask.java    From pubsub with Apache License 2.0 4 votes vote down vote up
@Override
public void commit() throws InterruptedException {
  ackMessages();
}
 
Example 53
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that when requested, Kafka headers are included in the messages published to Cloud
 * Pub/Sub but if some of these headers are unsupported either if its key has more than 256 bytes long
 * or its value has more than 1024 bytes it will be discarded.
 */
@Test
public void testUnsupportedKafkaHeaders() {
  props.put(CloudPubSubSinkConnector.PUBLISH_KAFKA_HEADERS, "true");
  task.start(props);
  String veryLongHeaderName;
  String veryLongValue;
  StringBuilder stringBuilder = new StringBuilder();
  for (int i = 0; i < 257; i++) {
    stringBuilder.append("-");
  }
  veryLongHeaderName = stringBuilder.toString();
  stringBuilder.setLength(0);
  for (int i = 0; i < 1025; i++) {
    stringBuilder.append(".");
  }
  veryLongValue = stringBuilder.toString();
  stringBuilder.setLength(0);
  List<SinkRecord> records = new ArrayList<SinkRecord>();
  SinkRecord record = new SinkRecord(
      KAFKA_TOPIC,
      4,
      STRING_SCHEMA,
      KAFKA_MESSAGE_KEY,
      BYTE_STRING_SCHEMA,
      KAFKA_MESSAGE1,
      1000,
      50000L,
      TimestampType.CREATE_TIME);
  record.headers().addString("myHeader", "myValue");
  record.headers().addString(veryLongHeaderName, "anotherValue");
  record.headers().addString("anotherHeader", veryLongValue);
  record.headers().addString(veryLongHeaderName, veryLongValue);
  records.add(record);
  record = new SinkRecord(
      KAFKA_TOPIC,
      4,
      STRING_SCHEMA,
      KAFKA_MESSAGE_KEY,
      BYTE_STRING_SCHEMA,
      KAFKA_MESSAGE2,
      1001,
      50001L,
      TimestampType.CREATE_TIME);
  record.headers().addString("yourHeader", "yourValue");
  records.add(record);
  task.put(records);
  ArgumentCaptor<PubsubMessage> captor = ArgumentCaptor.forClass(PubsubMessage.class);
  verify(publisher, times(2)).publish(captor.capture());
  List<PubsubMessage> requestArgs = captor.getAllValues();


  List<PubsubMessage> expectedMessages = new ArrayList<>();
  Map<String, String> attributes1 = new HashMap<>();
  attributes1.put(ConnectorUtils.CPS_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY);
  attributes1.put("myHeader", "myValue");
  expectedMessages.add(
      PubsubMessage.newBuilder().putAllAttributes(attributes1).setData(KAFKA_MESSAGE1).build());
  Map<String, String> attributes2 = new HashMap<>();
  attributes2.put(ConnectorUtils.CPS_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY);
  attributes2.put("yourHeader", "yourValue");
  expectedMessages.add(
      PubsubMessage.newBuilder().putAllAttributes(attributes2).setData(KAFKA_MESSAGE2).build());

  assertEquals(257, veryLongHeaderName.getBytes().length);
  assertEquals(1025, veryLongValue.getBytes().length);
  assertEquals(expectedMessages, requestArgs);
}
 
Example 54
Source File: CloudPubSubSourceConnector.java    From pubsub with Apache License 2.0 4 votes vote down vote up
PartitionScheme(String value) {
  this.value = value;
}
 
Example 55
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 4 votes vote down vote up
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
  return false;
}
 
Example 56
Source File: AutoscalingPolicyQueueBasedScaling.java    From google-api-java-client-services with Apache License 2.0 2 votes vote down vote up
/**
 * Configuration for Cloud Pub/Sub subscription queue.
 * @return value or {@code null} for none
 */
public AutoscalingPolicyQueueBasedScalingCloudPubSub getCloudPubSub() {
  return cloudPubSub;
}