Java Code Examples for io.pravega.client.EventStreamClientFactory#createEventWriter()
The following examples show how to use
io.pravega.client.EventStreamClientFactory#createEventWriter() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SetupUtils.java From flink-connectors with Apache License 2.0 | 5 votes |
/** * Create a stream writer for writing Integer events. * * @param streamName Name of the test stream. * * @return Stream writer instance. */ public EventStreamWriter<Integer> getIntegerWriter(final String streamName) { Preconditions.checkState(this.started.get(), "Services not yet started"); Preconditions.checkNotNull(streamName); EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(this.scope, getClientConfig()); return clientFactory.createEventWriter( streamName, new IntegerSerializer(), EventWriterConfig.builder().build()); }
Example 2
Source File: DebugStreamSegmentsTest.java From pravega with Apache License 2.0 | 5 votes |
private void randomScaleUpScaleDown(final EventStreamClientFactory clientFactory, final Controller controller) { @Cleanup EventStreamWriter<AutoScaleEvent> requestStreamWriter = clientFactory.createEventWriter("_requeststream", autoScaleEventSerializer, EventWriterConfig.builder() .build()); final Collection<Segment> currentSegments = controller.getCurrentSegments(SCOPE, STREAM).join().getSegments(); Assert.assertTrue("Current Number of segments cannot be zero", currentSegments.size() > 0); // fetch a randomSegment final Segment randomSegment = currentSegments.toArray(new Segment[0])[random.nextInt(currentSegments.size())]; AutoScaleEvent scaleEvent = null; if (random.nextBoolean()) { // trigger random scale up scaleEvent = new AutoScaleEvent(randomSegment.getScope(), randomSegment.getStreamName(), randomSegment.getSegmentId(), AutoScaleEvent.UP, System.currentTimeMillis(), 2, false, random.nextInt()); } else { // trigger random scale down. scaleEvent = new AutoScaleEvent(randomSegment.getScope(), randomSegment.getStreamName(), randomSegment.getSegmentId(), AutoScaleEvent.DOWN, System.currentTimeMillis(), 2, false, random.nextInt()); // silent=false } Futures.getAndHandleExceptions(requestStreamWriter.writeEvent(scaleEvent), t -> new RuntimeException("Error while writing scale event", t)); }
Example 3
Source File: BatchClientTest.java From pravega with Apache License 2.0 | 5 votes |
protected void createTestStreamWithEvents(EventStreamClientFactory clientFactory) throws InterruptedException, ExecutionException { createStream(); @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, serializer, EventWriterConfig.builder().build()); // write events to stream with 1 segment. write30ByteEvents(3, writer); // scale up and write events. Map<Double, Double> map = new HashMap<>(); map.put(0.0, 0.33); map.put(0.33, 0.66); map.put(0.66, 1.0); Boolean result; assertTrue("Scale up operation", controllerWrapper.getController().scaleStream(Stream.of(SCOPE, STREAM), Collections.singletonList(0L), map, executor).getFuture().join()); write30ByteEvents(3, writer); //scale down and write events. map = new HashMap<>(); map.put(0.0, 0.5); map.put(0.5, 1.0); result = controllerWrapper.getController().scaleStream(Stream.of(SCOPE, STREAM), Arrays.asList(computeSegmentId(1, 1), computeSegmentId(2, 1), computeSegmentId(3, 1)), map, executor).getFuture().get(); assertTrue("Scale down operation result", result); write30ByteEvents(3, writer); }
Example 4
Source File: BoundedStreamReaderTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testReaderGroupWithSameBounds() throws Exception { createScope(SCOPE); createStream(STREAM1); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build()); @Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM1, serializer, EventWriterConfig.builder().build()); // 1. Prep the stream with data. // Write events with event size of 30 writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get(); // 2. Create a StreamCut Pointing to offset 30L StreamCut streamCut = getStreamCut(STREAM1, 30L, 0); // 3. Create a ReaderGroup where the lower and upper bound are the same. @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri); groupManager.createReaderGroup("group", ReaderGroupConfig .builder().disableAutomaticCheckpoints() .stream(Stream.of(SCOPE, STREAM1), streamCut, streamCut) .build()); // 4. Create a reader @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build()); // 5. Verify if configuration is enforced. Assert.assertNull("Null is expected", reader.readNextEvent(1000).getEvent()); }
Example 5
Source File: EndToEndReaderGroupTest.java From pravega with Apache License 2.0 | 5 votes |
private void writeTestEvent(String scope, String streamName, int eventId) { @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build()); @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build()); writer.writeEvent( "0", Integer.toString(eventId)).join(); }
Example 6
Source File: AbstractReadWriteTest.java From pravega with Apache License 2.0 | 5 votes |
void writeEvents(EventStreamClientFactory clientFactory, String streamName, int totalEvents, int initialPoint) { @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build()); for (int i = initialPoint; i < totalEvents + initialPoint; i++) { writer.writeEvent(String.format("%03d", i)).join(); // this ensures the event size is constant. log.debug("Writing event: {} to stream {}.", streamName + String.valueOf(i), streamName); } }
Example 7
Source File: ReaderGroupTest.java From pravega with Apache License 2.0 | 5 votes |
public void writeEvents(int eventsToWrite, EventStreamClientFactory clientFactory) { @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().build()); for (int i = 0; i < eventsToWrite; i++) { writer.writeEvent(Integer.toString(i), " Event " + i); } writer.flush(); }
Example 8
Source File: ReadWriteUtils.java From pravega with Apache License 2.0 | 5 votes |
public static void writeEvents(EventStreamClientFactory clientFactory, String streamName, int totalEvents, int offset) { @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new UTF8StringSerializer(), EventWriterConfig.builder().build()); for (int i = offset; i < totalEvents; i++) { writer.writeEvent(String.valueOf(i)).join(); log.info("Writing event: {} to stream {}", i, streamName); } }
Example 9
Source File: EndToEndStatsTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(timeout = 10000) @SuppressWarnings("deprecation") public void testStatsCount() throws Exception { StreamConfiguration config = StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.fixed(1)) .build(); Controller controller = controllerWrapper.getController(); controllerWrapper.getControllerService().createScope("test").get(); controller.createStream("test", "test", config).get(); @Cleanup EventStreamClientFactory clientFactory = new ClientFactoryImpl("test", controller); EventWriterConfig writerConfig = EventWriterConfig.builder().transactionTimeoutTime(10000).build(); @Cleanup EventStreamWriter<String> eventWriter = clientFactory.createEventWriter("test", new JavaSerializer<>(), writerConfig); @Cleanup TransactionalEventStreamWriter<String> txnWriter = clientFactory.createTransactionalEventWriter("test", new JavaSerializer<>(), writerConfig); String[] tags = segmentTags(NameUtils.getQualifiedStreamSegmentName("test", "test", 0L)); for (int i = 0; i < 10; i++) { eventWriter.writeEvent("test").get(); } assertEventuallyEquals(10, () -> (int) (statsRecorder.getRegistry().counter(SEGMENT_WRITE_EVENTS, tags).count()), 2000); assertEventuallyEquals(190, () -> (int) (statsRecorder.getRegistry().counter(SEGMENT_WRITE_BYTES, tags).count()), 100); Transaction<String> transaction = txnWriter.beginTxn(); for (int i = 0; i < 10; i++) { transaction.writeEvent("0", "txntest1"); } assertEventuallyEquals(10, () -> (int) (statsRecorder.getRegistry().counter(SEGMENT_WRITE_EVENTS, tags).count()), 2000); assertEventuallyEquals(190, () -> (int) (statsRecorder.getRegistry().counter(SEGMENT_WRITE_BYTES, tags).count()), 100); transaction.commit(); assertEventuallyEquals(20, () -> (int) (statsRecorder.getRegistry().counter(SEGMENT_WRITE_EVENTS, tags).count()), 10000); assertEventuallyEquals(420, () -> (int) (statsRecorder.getRegistry().counter(SEGMENT_WRITE_BYTES, tags).count()), 100); }
Example 10
Source File: DelegationTokenTest.java From pravega with Apache License 2.0 | 5 votes |
private void writeAnEvent(int tokenTtlInSeconds) throws ExecutionException, InterruptedException { ClusterWrapper pravegaCluster = new ClusterWrapper(true, tokenTtlInSeconds); try { pravegaCluster.initialize(); String scope = "testscope"; String streamName = "teststream"; int numSegments = 1; String message = "test message"; ClientConfig clientConfig = ClientConfig.builder() .controllerURI(URI.create(pravegaCluster.controllerUri())) .credentials(new DefaultCredentials("1111_aaaa", "admin")) .build(); log.debug("Done creating client config."); createScopeStream(scope, streamName, numSegments, clientConfig); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig); //@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<String>(), EventWriterConfig.builder().build()); // Note: A TokenException is thrown here if token verification fails on the server. writer.writeEvent(message).get(); log.debug("Done writing message '{}' to stream '{} / {}'", message, scope, streamName); } finally { pravegaCluster.close(); } }
Example 11
Source File: FlinkPravegaWriter.java From flink-connectors with Apache License 2.0 | 5 votes |
AbstractInternalWriter(EventStreamClientFactory clientFactory, boolean txnWriter) { Serializer<T> eventSerializer = new FlinkSerializer<>(serializationSchema); EventWriterConfig writerConfig = EventWriterConfig.builder() .transactionTimeoutTime(txnLeaseRenewalPeriod) .build(); watermark = Long.MIN_VALUE; if (txnWriter) { pravegaTxnWriter = clientFactory.createTransactionalEventWriter(writerId(), stream.getStreamName(), eventSerializer, writerConfig); } else { pravegaWriter = clientFactory.createEventWriter(writerId(), stream.getStreamName(), eventSerializer, writerConfig); } }
Example 12
Source File: WatermarkingTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void progressingWatermarkWithWriterTimeouts() throws Exception { String scope = "Timeout"; String streamName = "Timeout"; int numSegments = 1; URI controllerUri = URI.create("tcp://localhost:" + controllerPort); ClientConfig clientConfig = ClientConfig.builder().controllerURI(controllerUri).build(); @Cleanup StreamManager streamManager = StreamManager.create(controllerUri); assertNotNull(streamManager); streamManager.createScope(scope); streamManager.createStream(scope, streamName, StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.fixed(numSegments)) .build()); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig); @Cleanup SynchronizerClientFactory syncClientFactory = SynchronizerClientFactory.withScope(scope, clientConfig); String markStream = NameUtils.getMarkStreamForStream(streamName); RevisionedStreamClient<Watermark> watermarkReader = syncClientFactory.createRevisionedStreamClient(markStream, new WatermarkSerializer(), SynchronizerConfig.builder().build()); LinkedBlockingQueue<Watermark> watermarks = new LinkedBlockingQueue<>(); AtomicBoolean stopFlag = new AtomicBoolean(false); fetchWatermarks(watermarkReader, watermarks, stopFlag); // create two writers and write two sevent and call note time for each writer. @Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build()); writer1.writeEvent("1").get(); writer1.noteTime(100L); @Cleanup EventStreamWriter<String> writer2 = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build()); writer2.writeEvent("2").get(); writer2.noteTime(102L); // writer0 should timeout. writer1 and writer2 should result in two more watermarks with following times: // 1: 100L-101L 2: 101-101 // then first writer should timeout and be discarded. But second writer should continue to be active as its time // is higher than first watermark. This should result in a second watermark to be emitted. AssertExtensions.assertEventuallyEquals(true, () -> watermarks.size() == 2, 100000); Watermark watermark1 = watermarks.poll(); Watermark watermark2 = watermarks.poll(); assertEquals(100L, watermark1.getLowerTimeBound()); assertEquals(102L, watermark1.getUpperTimeBound()); assertEquals(102L, watermark2.getLowerTimeBound()); assertEquals(102L, watermark2.getUpperTimeBound()); // stream cut should be same assertTrue(watermark2.getStreamCut().entrySet().stream().allMatch(x -> watermark1.getStreamCut().get(x.getKey()).equals(x.getValue()))); // bring back writer1 and post an event with note time smaller than current watermark writer1.writeEvent("3").get(); writer1.noteTime(101L); // no watermark should be emitted. Watermark nullMark = watermarks.poll(10, TimeUnit.SECONDS); assertNull(nullMark); }
Example 13
Source File: PravegaTeraSortOutputFormat.java From pravega-samples with Apache License 2.0 | 4 votes |
@Override public RecordWriter<String, V> getRecordWriter(TaskAttemptContext context) throws IOException { Configuration conf = context.getConfiguration(); final String scopeName = Optional.ofNullable(conf.get(OUTPUT_SCOPE_NAME)).orElseThrow(() -> new IOException("The output scope name must be configured (" + OUTPUT_SCOPE_NAME + ")")); final String outputStreamPrefix = Optional.ofNullable(conf.get(OUTPUT_STREAM_PREFIX)).orElseThrow(() -> new IOException("The output stream prefix must be configured (" + OUTPUT_STREAM_PREFIX + ")")); final URI controllerURI = Optional.ofNullable(conf.get(OUTPUT_URI_STRING)).map(URI::create).orElseThrow(() -> new IOException("The Pravega controller URI must be configured (" + OUTPUT_URI_STRING + ")")); final String deserializerClassName = Optional.ofNullable(conf.get(OUTPUT_DESERIALIZER)).orElseThrow(() -> new IOException("The event deserializer must be configured (" + OUTPUT_DESERIALIZER + ")")); final String outputStreamName = outputStreamPrefix + context.getTaskAttemptID().getTaskID().getId(); StreamManager streamManager = StreamManager.create(controllerURI); streamManager.createScope(scopeName); StreamConfiguration streamConfig = StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.fixed(1)) .build(); streamManager.createStream(scopeName, outputStreamName, streamConfig); EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scopeName, ClientConfig.builder().controllerURI(controllerURI).build()); Serializer deserializer; try { Class<?> deserializerClass = Class.forName(deserializerClassName); deserializer = (Serializer<V>) deserializerClass.newInstance(); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { log.error("Exception when creating deserializer: {}", e); throw new IOException( "Unable to create the event deserializer (" + deserializerClassName + ")", e); } EventStreamWriter<V> writer = clientFactory.createEventWriter(outputStreamName, deserializer, EventWriterConfig.builder().build()); return new PravegaOutputRecordWriter<V>(writer); }
Example 14
Source File: UnreadBytesTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 50000) public void testUnreadBytesWithEndStreamCuts() throws Exception { StreamConfiguration config = StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)) .build(); Controller controller = controllerWrapper.getController(); controllerWrapper.getControllerService().createScope("unreadbytes").get(); controller.createStream("unreadbytes", "unreadbytes", config).get(); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope("unreadbytes", ClientConfig.builder().controllerURI(controllerUri).build()); @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter("unreadbytes", new JavaSerializer<>(), EventWriterConfig.builder().build()); //Write just 2 events to simplify simulating a checkpoint. writer.writeEvent("0", "data of size 30").get(); writer.writeEvent("0", "data of size 30").get(); @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope("unreadbytes", ClientConfig.builder().controllerURI(controllerUri).build()); //create a bounded reader group. groupManager.createReaderGroup("group", ReaderGroupConfig .builder().disableAutomaticCheckpoints().stream("unreadbytes/unreadbytes", StreamCut.UNBOUNDED, getStreamCut("unreadbytes", 90L, 0)).build()); ReaderGroup readerGroup = groupManager.getReaderGroup("group"); @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", new JavaSerializer<>(), ReaderConfig.builder().build()); EventRead<String> firstEvent = reader.readNextEvent(15000); EventRead<String> secondEvent = reader.readNextEvent(15000); assertNotNull(firstEvent); assertEquals("data of size 30", firstEvent.getEvent()); assertNotNull(secondEvent); assertEquals("data of size 30", secondEvent.getEvent()); // trigger a checkpoint. CompletableFuture<Checkpoint> chkPointResult = readerGroup.initiateCheckpoint("test", executor); EventRead<String> chkpointEvent = reader.readNextEvent(15000); assertEquals("test", chkpointEvent.getCheckpointName()); EventRead<String> emptyEvent = reader.readNextEvent(100); assertEquals(false, emptyEvent.isCheckpoint()); assertEquals(null, emptyEvent.getEvent()); chkPointResult.join(); //Writer events, to ensure 120Bytes are written. writer.writeEvent("0", "data of size 30").get(); writer.writeEvent("0", "data of size 30").get(); long unreadBytes = readerGroup.getMetrics().unreadBytes(); //Ensure the endoffset of 90 Bytes is taken into consideration when computing unread assertTrue("Unread bvtes: " + unreadBytes, unreadBytes == 30); }
Example 15
Source File: StreamRecreationTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 40000) @SuppressWarnings("deprecation") public void testStreamRecreation() throws Exception { final String myScope = "myScope"; final String myStream = "myStream"; final String myReaderGroup = "myReaderGroup"; final int numIterations = 10; // Create the scope and the stream. @Cleanup StreamManager streamManager = StreamManager.create(controllerURI); streamManager.createScope(myScope); @Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(myScope, controllerURI); final ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder() .stream(Stream.of(myScope, myStream)) .build(); for (int i = 0; i < numIterations; i++) { log.info("Stream re-creation iteration {}.", i); final String eventContent = "myEvent" + String.valueOf(i); StreamConfiguration streamConfiguration = StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.fixed(i + 1)) .build(); EventWriterConfig eventWriterConfig = EventWriterConfig.builder().build(); streamManager.createStream(myScope, myStream, streamConfiguration); // Write a single event. @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(myScope, ClientConfig.builder().controllerURI(controllerURI).build()); EventStreamWriter<String> writer = clientFactory.createEventWriter(myStream, new JavaSerializer<>(), eventWriterConfig); TransactionalEventStreamWriter<String> txnWriter = clientFactory.createTransactionalEventWriter(myStream, new JavaSerializer<>(), eventWriterConfig); // Write events regularly and with transactions. if (i % 2 == 0) { writer.writeEvent(eventContent).join(); } else { Transaction<String> myTransaction = txnWriter.beginTxn(); myTransaction.writeEvent(eventContent); myTransaction.commit(); while (myTransaction.checkStatus() != Transaction.Status.COMMITTED) { Exceptions.handleInterrupted(() -> Thread.sleep(100)); } } writer.close(); // Read the event. readerGroupManager.createReaderGroup(myReaderGroup, readerGroupConfig); readerGroupManager.getReaderGroup(myReaderGroup).resetReaderGroup(readerGroupConfig); @Cleanup EventStreamReader<String> reader = clientFactory.createReader("myReader", myReaderGroup, new JavaSerializer<>(), ReaderConfig.builder().build()); String readResult; do { readResult = reader.readNextEvent(1000).getEvent(); } while (readResult == null); assertEquals("Wrong event read in re-created stream", eventContent, readResult); // Delete the stream. StreamInfo streamInfo = streamManager.getStreamInfo(myScope, myStream); assertFalse(streamInfo.isSealed()); assertTrue("Unable to seal re-created stream.", streamManager.sealStream(myScope, myStream)); streamInfo = streamManager.getStreamInfo(myScope, myStream); assertTrue(streamInfo.isSealed()); assertTrue("Unable to delete re-created stream.", streamManager.deleteStream(myScope, myStream)); } }
Example 16
Source File: BoundedStreamReaderTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testBoundedStreamTest() throws Exception { createScope(SCOPE); createStream(STREAM1); createStream(STREAM2); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build()); @Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM1, serializer, EventWriterConfig.builder().build()); //Prep the stream with data. //1.Write events with event size of 30 writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(3)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(4)).get(); @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri); groupManager.createReaderGroup("group", ReaderGroupConfig .builder().disableAutomaticCheckpoints() .stream(Stream.of(SCOPE, STREAM1), //startStreamCut points to the current HEAD of stream StreamCut.UNBOUNDED, //endStreamCut points to the offset after two events.(i.e 2 * 30(event size) = 60) getStreamCut(STREAM1, 60L, 0)) .stream(Stream.of(SCOPE, STREAM2)) .build()); //Create a reader @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build()); //2. Verify if endStreamCut configuration is enforced. readAndVerify(reader, 1, 2); //The following read should not return events 3, 4 due to the endStreamCut configuration. Assert.assertNull("Null is expected", reader.readNextEvent(2000).getEvent()); //3. Write events to the STREAM2. @Cleanup EventStreamWriter<String> writer2 = clientFactory.createEventWriter(STREAM2, serializer, EventWriterConfig.builder().build()); writer2.writeEvent(keyGenerator.get(), getEventData.apply(5)).get(); writer2.writeEvent(keyGenerator.get(), getEventData.apply(6)).get(); //4. Verify that events can be read from STREAM2. (Events from STREAM1 are not read since endStreamCut is reached). readAndVerify(reader, 5, 6); Assert.assertNull("Null is expected", reader.readNextEvent(2000).getEvent()); }
Example 17
Source File: BoundedStreamReaderTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testBoundedStreamWithScaleTest() throws Exception { createScope(SCOPE); createStream(STREAM1); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build()); @Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM1, serializer, EventWriterConfig.builder().build()); //Prep the stream with data. //1.Write events with event size of 30 writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get(); //2.Scale stream Map<Double, Double> newKeyRanges = new HashMap<>(); newKeyRanges.put(0.0, 0.33); newKeyRanges.put(0.33, 0.66); newKeyRanges.put(0.66, 1.0); scaleStream(STREAM1, newKeyRanges); //3.Write three events with event size of 30 writer1.writeEvent(keyGenerator.get(), getEventData.apply(3)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(4)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(5)).get(); @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri); ReaderGroupConfig readerGroupCfg1 = ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0) .stream(Stream.of(SCOPE, STREAM1), //startStreamCut points to the current HEAD of stream StreamCut.UNBOUNDED, //endStreamCut points to the offset after two events.(i.e 2 * 30(event size) = 60) getStreamCut(STREAM1, 60L, 0)) .build(); groupManager.createReaderGroup("group", readerGroupCfg1); ReaderGroup readerGroup = groupManager.getReaderGroup("group"); //Create a reader @Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId1", "group", serializer, ReaderConfig.builder().build()); //2. Verify if endStreamCut configuration is enforced. readAndVerify(reader1, 1, 2); //The following read should not return events 3, 4 due to the endStreamCut configuration. Assert.assertNull("Null is expected", reader1.readNextEvent(2000).getEvent()); final ReaderGroupConfig readerGroupCfg2 = ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0) .stream(Stream.of(SCOPE, STREAM1), getStreamCut(STREAM1, 60L, 0), //endStreamCut points to the offset after two events.(i.e 2 * 30(event size) = 60) getStreamCut(STREAM1, 90L, 1, 2, 3)) .build(); readerGroup.resetReaderGroup(readerGroupCfg2); verifyReinitializationRequiredException(reader1); //Create a reader @Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("readerId2", "group", serializer, ReaderConfig.builder().build()); assertNull(reader2.readNextEvent(100).getEvent()); readerGroup.initiateCheckpoint("c1", executor); readAndVerify(reader2, 3, 4, 5); Assert.assertNull("Null is expected", reader2.readNextEvent(2000).getEvent()); }
Example 18
Source File: BoundedStreamReaderTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testBoundedStreamWithTruncationTest() throws Exception { createScope(SCOPE); createStream(STREAM3); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build()); @Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM3, serializer, EventWriterConfig.builder().build()); //Prep the stream with data. //1.Write events with event size of 30 writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(3)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(4)).get(); @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri); StreamCut offset30SC = getStreamCut(STREAM3, 30L, 0); // Streamcut pointing to event 2. StreamCut offset60SC = getStreamCut(STREAM3, 60L, 0); groupManager.createReaderGroup("group", ReaderGroupConfig .builder().disableAutomaticCheckpoints() .stream(Stream.of(SCOPE, STREAM3), //startStreamCut points to second event in the stream. offset30SC, //endStreamCut points to the offset after two events.(i.e 2 * 30(event size) = 60) offset60SC) .build()); final ReaderGroup rg = groupManager.getReaderGroup("group"); //Create a reader @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build()); //2. Verify if endStreamCut configuration is enforced. readAndVerify(reader, 2); //The following read should not return events 3, 4 due to the endStreamCut configuration. Assert.assertNull("Null is expected", reader.readNextEvent(2000).getEvent()); truncateStream(STREAM3, offset60SC); //Truncation should not affect the reader as it is already post the truncation point. Assert.assertNull("Null is expected", reader.readNextEvent(2000).getEvent()); //Reset RG with startStreamCut which is already truncated. rg.resetReaderGroup(ReaderGroupConfig.builder().disableAutomaticCheckpoints() .stream(Stream.of(SCOPE, STREAM3), offset30SC, StreamCut.UNBOUNDED) .build()); verifyReinitializationRequiredException(reader); //Create a reader @Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("readerId2", "group", serializer, ReaderConfig.builder().build()); assertThrows(TruncatedDataException.class, () -> reader2.readNextEvent(10000)); //subsequent read should return data present post truncation, Event3 is returned here since stream was truncated @ offset 30 * 2. readAndVerify(reader2, 3); }
Example 19
Source File: MultiSegmentStoreTest.java From pravega with Apache License 2.0 | 4 votes |
private void testReadWrite() { List<URI> ctlURIs = this.controllerInstance.getServiceDetails(); URI controllerUri = ctlURIs.get(0); String scope = "testscope" + RandomStringUtils.randomAlphanumeric(10); String stream = "teststream" + RandomStringUtils.randomAlphanumeric(10); ClientConfig clientConfig = Utils.buildClientConfig(controllerUri); @Cleanup StreamManager streamManager = StreamManager.create(clientConfig); Assert.assertTrue(streamManager.createScope(scope)); // Create stream with large number of segments so that most segment containers are used. Assert.assertTrue(streamManager.createStream(scope, stream, StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.fixed(10)) .build())); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig); log.info("Invoking writer with controller URI: {}", controllerUri); @Cleanup EventStreamWriter<Serializable> writer = clientFactory.createEventWriter(stream, new JavaSerializer<>(), EventWriterConfig.builder().build()); final int numEvents = 1000; final String fixedEvent = "testevent"; for (int i = 0; i < numEvents; i++) { log.debug("Producing event: {} ", fixedEvent); writer.writeEvent(String.valueOf(i), fixedEvent); } writer.flush(); log.info("Invoking reader with controller URI: {}", controllerUri); final String readerGroup = "testreadergroup" + RandomStringUtils.randomAlphanumeric(10); ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, clientConfig); groupManager.createReaderGroup(readerGroup, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(scope, stream)).build()); @Cleanup EventStreamReader<String> reader = clientFactory.createReader(UUID.randomUUID().toString(), readerGroup, new JavaSerializer<>(), ReaderConfig.builder().build()); for (int i = 0; i < numEvents; i++) { try { String event = reader.readNextEvent(60000).getEvent(); Assert.assertEquals(fixedEvent, event); } catch (ReinitializationRequiredException e) { log.error("Unexpected request to reinitialize {}", e); throw new IllegalStateException("Unexpected request to reinitialize"); } } }
Example 20
Source File: StreamSeekTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 50000) public void testStreamSeek() throws Exception { createScope(SCOPE); createStream(STREAM1); createStream(STREAM2); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build()); @Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM1, serializer, EventWriterConfig.builder().build()); @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri); groupManager.createReaderGroup("group", ReaderGroupConfig.builder() .disableAutomaticCheckpoints() .groupRefreshTimeMillis(0) .stream(Stream.of(SCOPE, STREAM1)) .stream(Stream.of(SCOPE, STREAM2)) .build()); @Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup("group"); //Prep the stream with data. //1.Write two events with event size of 30 writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get(); //2.Scale stream Map<Double, Double> newKeyRanges = new HashMap<>(); newKeyRanges.put(0.0, 0.33); newKeyRanges.put(0.33, 0.66); newKeyRanges.put(0.66, 1.0); scaleStream(STREAM1, newKeyRanges); //3.Write three events with event size of 30 writer1.writeEvent(keyGenerator.get(), getEventData.apply(3)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(4)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(5)).get(); //Create a reader @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build()); //Offset of a streamCut is always set to zero. Map<Stream, StreamCut> streamCut1 = readerGroup.getStreamCuts(); //Stream cut 1 readAndVerify(reader, 1, 2); assertNull(reader.readNextEvent(100).getEvent()); //Sees the segments are empty prior to scaling readerGroup.initiateCheckpoint("cp1", executor); //Checkpoint to move past the scale readAndVerify(reader, 3, 4, 5); // Old segments are released and new ones can be read Map<Stream, StreamCut> streamCut2 = readerGroup.getStreamCuts(); //Stream cut 2 readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(streamCut1).build()); //reset the readers to offset 0. verifyReinitializationRequiredException(reader); @Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build()); //verify that we are at streamCut1 readAndVerify(reader1, 1, 2); readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(streamCut2).build()); // reset readers to post scale offset 0 verifyReinitializationRequiredException(reader1); @Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build()); //verify that we are at streamCut2 readAndVerify(reader2, 3, 4, 5); }