Java Code Examples for org.apache.beam.sdk.util.CoderUtils#clone()
The following examples show how to use
org.apache.beam.sdk.util.CoderUtils#clone() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PAssertTest.java From beam with Apache License 2.0 | 6 votes |
@Test public void testFailureWithExceptionEncodedDecoded() throws IOException { Throwable error; try { throwWrappedError(); throw new IllegalStateException("Should have failed"); } catch (Throwable e) { error = e; } SuccessOrFailure failure = SuccessOrFailure.failure(PAssert.PAssertionSite.capture("here"), error); SuccessOrFailure res = CoderUtils.clone(SerializableCoder.of(SuccessOrFailure.class), failure); assertEquals( "Encode-decode failed SuccessOrFailure", Throwables.getStackTraceAsString(failure.assertionError()), Throwables.getStackTraceAsString(res.assertionError())); }
Example 2
Source File: PublishResultCodersTest.java From beam with Apache License 2.0 | 6 votes |
@Test public void testFullPublishResultWithoutHeadersDecodeEncodeEquals() throws Exception { CoderProperties.coderDecodeEncodeEqual( PublishResultCoders.fullPublishResultWithoutHeaders(), new PublishResult().withMessageId(UUID.randomUUID().toString())); PublishResult value = buildFullPublishResult(); PublishResult clone = CoderUtils.clone(PublishResultCoders.fullPublishResultWithoutHeaders(), value); assertThat( clone.getSdkResponseMetadata().getRequestId(), equalTo(value.getSdkResponseMetadata().getRequestId())); assertThat( clone.getSdkHttpMetadata().getHttpStatusCode(), equalTo(value.getSdkHttpMetadata().getHttpStatusCode())); assertThat(clone.getSdkHttpMetadata().getHttpHeaders().isEmpty(), equalTo(true)); }
Example 3
Source File: PublishResultCodersTest.java From beam with Apache License 2.0 | 6 votes |
@Test public void testFullPublishResultIncludingHeadersDecodeEncodeEquals() throws Exception { CoderProperties.coderDecodeEncodeEqual( PublishResultCoders.fullPublishResult(), new PublishResult().withMessageId(UUID.randomUUID().toString())); PublishResult value = buildFullPublishResult(); PublishResult clone = CoderUtils.clone(PublishResultCoders.fullPublishResult(), value); assertThat( clone.getSdkResponseMetadata().getRequestId(), equalTo(value.getSdkResponseMetadata().getRequestId())); assertThat( clone.getSdkHttpMetadata().getHttpStatusCode(), equalTo(value.getSdkHttpMetadata().getHttpStatusCode())); assertThat( clone.getSdkHttpMetadata().getHttpHeaders(), equalTo(value.getSdkHttpMetadata().getHttpHeaders())); }
Example 4
Source File: TDigestQuantilesTest.java From beam with Apache License 2.0 | 6 votes |
private <T> boolean encodeDecodeEquals(MergingDigest tDigest) throws IOException { MergingDigest decoded = CoderUtils.clone(new MergingDigestCoder(), tDigest); boolean equal = true; // the only way to compare the two sketches is to compare them centroid by centroid. // Indeed, the means are doubles but are encoded as float and cast during decoding. // This entails a small approximation that makes the centroids different after decoding. Iterator<Centroid> it1 = decoded.centroids().iterator(); Iterator<Centroid> it2 = tDigest.centroids().iterator(); for (int i = 0; i < decoded.centroids().size(); i++) { Centroid c1 = it1.next(); Centroid c2 = it2.next(); if ((float) c1.mean() != (float) c2.mean() || c1.count() != c2.count()) { equal = false; break; } } return equal; }
Example 5
Source File: AwsCodersTest.java From beam with Apache License 2.0 | 5 votes |
@Test public void testSdkHttpMetadataWithoutHeadersDecodeEncodeEquals() throws Exception { SdkHttpMetadata value = buildSdkHttpMetadata(); SdkHttpMetadata clone = CoderUtils.clone(AwsCoders.sdkHttpMetadataWithoutHeaders(), value); assertThat(clone.getHttpStatusCode(), equalTo(value.getHttpStatusCode())); assertThat(clone.getHttpHeaders().isEmpty(), equalTo(true)); }
Example 6
Source File: AvroHdfsFileSource.java From components with Apache License 2.0 | 5 votes |
@Override protected KV<AvroKey, NullWritable> nextPair() throws IOException, InterruptedException { // Not only is the AvroKey reused by the file format, but the underlying GenericRecord is as well. KV<AvroKey, NullWritable> kv = super.nextPair(); GenericRecord gr = (GenericRecord) kv.getKey().datum(); gr = CoderUtils.clone(AvroCoder.of(gr.getSchema()), gr); return KV.of(new AvroKey(gr), kv.getValue()); }
Example 7
Source File: UnboundedReadEvaluatorFactory.java From beam with Apache License 2.0 | 5 votes |
private UnboundedReader<OutputT> getReader(UnboundedSourceShard<OutputT, CheckpointMarkT> shard) throws IOException { UnboundedReader<OutputT> existing = shard.getExistingReader(); if (existing == null) { CheckpointMarkT checkpoint = shard.getCheckpoint(); if (checkpoint != null) { checkpoint = CoderUtils.clone(shard.getSource().getCheckpointMarkCoder(), checkpoint); } return shard.getSource().createReader(options, checkpoint); } else { return existing; } }
Example 8
Source File: CoderTypeSerializer.java From beam with Apache License 2.0 | 5 votes |
@Override public T copy(T t) { try { return CoderUtils.clone(coder, t); } catch (CoderException e) { throw new RuntimeException("Could not clone.", e); } }
Example 9
Source File: InMemoryStateInternals.java From beam with Apache License 2.0 | 5 votes |
/** Like {@link CoderUtils#clone} but without a checked exception. */ private static <T> T uncheckedClone(Coder<T> coder, T value) { try { return CoderUtils.clone(coder, value); } catch (CoderException e) { throw new RuntimeException(e); } }
Example 10
Source File: KafkaIOTest.java From beam with Apache License 2.0 | 5 votes |
@Test public void testUnboundedSourceCheckpointMark() throws Exception { int numElements = 85; // 85 to make sure some partitions have more records than other. // create a single split: UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source = mkKafkaReadTransform(numElements, new ValueAsTimestampFn()) .makeSource() .split(1, PipelineOptionsFactory.create()) .get(0); UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null); final int numToSkip = 20; // one from each partition. // advance numToSkip elements for (int i = 0; i < numToSkip; ++i) { advanceOnce(reader, i > 0); } // Confirm that we get the expected element in sequence before checkpointing. assertEquals(numToSkip - 1, (long) reader.getCurrent().getKV().getValue()); assertEquals(numToSkip - 1, reader.getCurrentTimestamp().getMillis()); // Checkpoint and restart, and confirm that the source continues correctly. KafkaCheckpointMark mark = CoderUtils.clone( source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark()); reader = source.createReader(null, mark); // Confirm that we get the next elements in sequence. // This also confirms that Reader interleaves records from each partitions by the reader. for (int i = numToSkip; i < numElements; i++) { advanceOnce(reader, i > numToSkip); assertEquals(i, (long) reader.getCurrent().getKV().getValue()); assertEquals(i, reader.getCurrentTimestamp().getMillis()); } }
Example 11
Source File: HadoopFormatIO.java From beam with Apache License 2.0 | 5 votes |
/** * Beam expects immutable objects, but the Hadoop InputFormats tend to re-use the same object * when returning them. Hence, mutable objects returned by Hadoop InputFormats are cloned. */ private <T> T cloneIfPossiblyMutable(T input, Coder<T> coder) throws CoderException, ClassCastException { // If the input object is not of known immutable type, clone the object. if (!isKnownImmutable(input)) { input = CoderUtils.clone(coder, input); } return input; }
Example 12
Source File: AmqpMessageCoderTest.java From beam with Apache License 2.0 | 5 votes |
@Test public void encodeDecodeLargeMessage() throws Exception { Message message = Message.Factory.create(); message.setAddress("address"); message.setSubject("subject"); String body = Joiner.on("").join(Collections.nCopies(32 * 1024 * 1024, " ")); message.setBody(new AmqpValue(body)); AmqpMessageCoder coder = AmqpMessageCoder.of(); Message clone = CoderUtils.clone(coder, message); clone.getBody().toString().equals(message.getBody().toString()); }
Example 13
Source File: AmqpMessageCoderTest.java From beam with Apache License 2.0 | 5 votes |
@Test public void encodeDecode() throws Exception { Message message = Message.Factory.create(); message.setBody(new AmqpValue("body")); message.setAddress("address"); message.setSubject("test"); AmqpMessageCoder coder = AmqpMessageCoder.of(); Message clone = CoderUtils.clone(coder, message); assertEquals("AmqpValue{body}", clone.getBody().toString()); assertEquals("address", clone.getAddress()); assertEquals("test", clone.getSubject()); }
Example 14
Source File: AwsCodersTest.java From beam with Apache License 2.0 | 5 votes |
@Test public void testSdkHttpMetadataDecodeEncodeEquals() throws Exception { SdkHttpMetadata value = buildSdkHttpMetadata(); SdkHttpMetadata clone = CoderUtils.clone(AwsCoders.sdkHttpMetadata(), value); assertThat(clone.getHttpStatusCode(), equalTo(value.getHttpStatusCode())); assertThat(clone.getHttpHeaders(), equalTo(value.getHttpHeaders())); }
Example 15
Source File: PubsubUnboundedSourceTest.java From beam with Apache License 2.0 | 5 votes |
@Test public void noSubscriptionNoSplitGeneratesSubscription() throws Exception { TopicPath topicPath = PubsubClient.topicPathFromName("my_project", "my_topic"); factory = PubsubTestClient.createFactoryForCreateSubscription(); PubsubUnboundedSource source = new PubsubUnboundedSource( factory, StaticValueProvider.of(PubsubClient.projectPathFromId("my_project")), StaticValueProvider.of(topicPath), null /* subscription */, null /* timestampLabel */, null /* idLabel */, false /* needsAttributes */); assertThat(source.getSubscription(), nullValue()); assertThat(source.getSubscription(), nullValue()); PipelineOptions options = PipelineOptionsFactory.create(); PubsubSource actualSource = new PubsubSource(source); PubsubReader reader = actualSource.createReader(options, null); SubscriptionPath createdSubscription = reader.subscription; assertThat(createdSubscription, not(nullValue())); PubsubCheckpoint checkpoint = reader.getCheckpointMark(); assertThat(checkpoint.subscriptionPath, equalTo(createdSubscription.getPath())); checkpoint.finalizeCheckpoint(); PubsubCheckpoint deserCheckpoint = CoderUtils.clone(actualSource.getCheckpointMarkCoder(), checkpoint); assertThat(checkpoint.subscriptionPath, not(nullValue())); assertThat(checkpoint.subscriptionPath, equalTo(deserCheckpoint.subscriptionPath)); PubsubReader readerFromOriginal = actualSource.createReader(options, checkpoint); PubsubReader readerFromDeser = actualSource.createReader(options, deserCheckpoint); assertThat(readerFromOriginal.subscription, equalTo(createdSubscription)); assertThat(readerFromDeser.subscription, equalTo(createdSubscription)); }
Example 16
Source File: CountingSourceTest.java From beam with Apache License 2.0 | 5 votes |
@Test public void testUnboundedSourceCheckpointMark() throws Exception { UnboundedSource<Long, CounterMark> source = CountingSource.unboundedWithTimestampFn(new ValueAsTimestampFn()); UnboundedReader<Long> reader = source.createReader(null, null); final long numToSkip = 3; assertTrue(reader.start()); // Advance the source numToSkip elements and manually save state. for (long l = 0; l < numToSkip; ++l) { reader.advance(); } // Confirm that we get the expected element in sequence before checkpointing. assertEquals(numToSkip, (long) reader.getCurrent()); assertEquals(numToSkip, reader.getCurrentTimestamp().getMillis()); // Checkpoint and restart, and confirm that the source continues correctly. CounterMark mark = CoderUtils.clone(source.getCheckpointMarkCoder(), (CounterMark) reader.getCheckpointMark()); reader = source.createReader(null, mark); assertTrue(reader.start()); // Confirm that we get the next element in sequence. assertEquals(numToSkip + 1, (long) reader.getCurrent()); assertEquals(numToSkip + 1, reader.getCurrentTimestamp().getMillis()); }
Example 17
Source File: AvroCoderTest.java From beam with Apache License 2.0 | 5 votes |
/** * Tests that {@link AvroCoder} works around issues in Avro where cache classes might be from the * wrong ClassLoader, causing confusing "Cannot cast X to X" error messages. */ @Test public void testTwoClassLoaders() throws Exception { ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); ClassLoader loader1 = new InterceptingUrlClassLoader(contextClassLoader, AvroCoderTestPojo.class.getName()); ClassLoader loader2 = new InterceptingUrlClassLoader(contextClassLoader, AvroCoderTestPojo.class.getName()); Class<?> pojoClass1 = loader1.loadClass(AvroCoderTestPojo.class.getName()); Class<?> pojoClass2 = loader2.loadClass(AvroCoderTestPojo.class.getName()); Object pojo1 = InstanceBuilder.ofType(pojoClass1).withArg(String.class, "hello").build(); Object pojo2 = InstanceBuilder.ofType(pojoClass2).withArg(String.class, "goodbye").build(); // Confirm incompatibility try { pojoClass2.cast(pojo1); fail("Expected ClassCastException; without it, this test is vacuous"); } catch (ClassCastException e) { // g2g } // The first coder is expected to populate the Avro SpecificData cache // The second coder is expected to be corrupted if the caching is done wrong. AvroCoder<Object> avroCoder1 = (AvroCoder) AvroCoder.of(pojoClass1); AvroCoder<Object> avroCoder2 = (AvroCoder) AvroCoder.of(pojoClass2); Object cloned1 = CoderUtils.clone(avroCoder1, pojo1); Object cloned2 = CoderUtils.clone(avroCoder2, pojo2); // Confirming that the uncorrupted coder is fine pojoClass1.cast(cloned1); // Confirmed to fail prior to the fix pojoClass2.cast(cloned2); }
Example 18
Source File: KafkaIOTest.java From beam with Apache License 2.0 | 4 votes |
@Test public void testUnboundedSourceCheckpointMarkWithEmptyPartitions() throws Exception { // Similar to testUnboundedSourceCheckpointMark(), but verifies that source resumes // properly from empty partitions, without missing messages added since checkpoint. // Initialize consumer with fewer elements than number of partitions so that some are empty. int initialNumElements = 5; UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source = mkKafkaReadTransform(initialNumElements, new ValueAsTimestampFn()) .makeSource() .split(1, PipelineOptionsFactory.create()) .get(0); UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null); for (int l = 0; l < initialNumElements; ++l) { advanceOnce(reader, l > 0); } // Checkpoint and restart, and confirm that the source continues correctly. KafkaCheckpointMark mark = CoderUtils.clone( source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark()); // Create another source with MockConsumer with OffsetResetStrategy.LATEST. This insures that // the reader need to explicitly need to seek to first offset for partitions that were empty. int numElements = 100; // all the 20 partitions will have elements List<String> topics = ImmutableList.of("topic_a", "topic_b"); source = KafkaIO.<Integer, Long>read() .withBootstrapServers("none") .withTopics(topics) .withConsumerFactoryFn( new ConsumerFactoryFn(topics, 10, numElements, OffsetResetStrategy.LATEST)) .withKeyDeserializer(IntegerDeserializer.class) .withValueDeserializer(LongDeserializer.class) .withMaxNumRecords(numElements) .withTimestampFn(new ValueAsTimestampFn()) .makeSource() .split(1, PipelineOptionsFactory.create()) .get(0); reader = source.createReader(null, mark); // Verify in any order. As the partitions are unevenly read, the returned records are not in a // simple order. Note that testUnboundedSourceCheckpointMark() verifies round-robin oder. List<Long> expected = new ArrayList<>(); List<Long> actual = new ArrayList<>(); for (long i = initialNumElements; i < numElements; i++) { advanceOnce(reader, i > initialNumElements); expected.add(i); actual.add(reader.getCurrent().getKV().getValue()); } assertThat(actual, IsIterableContainingInAnyOrder.containsInAnyOrder(expected.toArray())); }
Example 19
Source File: AwsCodersTest.java From beam with Apache License 2.0 | 4 votes |
@Test public void testResponseMetadataDecodeEncodeEquals() throws Exception { ResponseMetadata value = buildResponseMetadata(); ResponseMetadata clone = CoderUtils.clone(AwsCoders.responseMetadata(), value); assertThat(clone.getRequestId(), equalTo(value.getRequestId())); }