io.pravega.client.stream.EventRead Java Examples
The following examples show how to use
io.pravega.client.stream.EventRead.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FlinkPravegaReaderRGStateITCase.java From flink-connectors with Apache License 2.0 | 6 votes |
@Override public T map(T value) throws Exception { if (this.sideStreamReader == null) { this.sideStreamReader = getIntegerReader(); } EventRead<Integer> rule = sideStreamReader.readNextEvent(50); if (rule.getEvent() != null) { log.info("Mapper: received side stream event: {}", rule.getEvent()); /* * Event == 1, continue process original events * Event == 2, trigger an exception (simulate failure) and reset the writer thread and start processing all the records */ if (rule.getEvent() == 2) { RESUME_WRITE_HANDLER.get().run(); throw new IntentionalException("artificial test failure"); } } return value; }
Example #2
Source File: ReaderCheckpointTest.java From pravega with Apache License 2.0 | 6 votes |
private Map<Stream, StreamCut> generateStreamCuts(final ReaderGroup readerGroup) { log.info("Generate StreamCuts"); String readerId = "streamCut"; CompletableFuture<Map<io.pravega.client.stream.Stream, StreamCut>> streamCuts = null; final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI); try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE_2, clientConfig); EventStreamReader<Integer> reader = clientFactory.createReader(readerId, READER_GROUP_NAME, new JavaSerializer<Integer>(), readerConfig)) { streamCuts = readerGroup.generateStreamCuts(executor); //create checkpoint Exceptions.handleInterrupted(() -> TimeUnit.MILLISECONDS.sleep(GROUP_REFRESH_TIME_MILLIS)); // sleep for group refresh. //read the next event, this causes the reader to update its latest offset. EventRead<Integer> event = reader.readNextEvent(READ_TIMEOUT); assertTrue("No events expected as all events are read", (event.getEvent() == null) && (!event.isCheckpoint())); Futures.exceptionListener(streamCuts, t -> log.error("StreamCut generation failed", t)); assertTrue("Stream cut generation should be completed", Futures.await(streamCuts)); } catch (ReinitializationRequiredException e) { log.error("Exception while reading event using readerId: {}", readerId, e); fail("Reinitialization Exception is not expected"); } return streamCuts.join(); }
Example #3
Source File: ReaderCheckpointTest.java From pravega with Apache License 2.0 | 6 votes |
private Checkpoint createCheckPointAndVerify(final ReaderGroup readerGroup, final String checkPointName) { log.info("Create and verify check point {}", checkPointName); String readerId = "checkPointReader"; CompletableFuture<Checkpoint> checkpoint = null; final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI); try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE_1, clientConfig); EventStreamReader<Integer> reader = clientFactory.createReader(readerId, READER_GROUP_NAME, new JavaSerializer<Integer>(), readerConfig)) { checkpoint = readerGroup.initiateCheckpoint(checkPointName, executor); //create checkpoint //verify checkpoint event. EventRead<Integer> event = reader.readNextEvent(READ_TIMEOUT); assertTrue("Read for Checkpoint event", (event != null) && (event.isCheckpoint())); assertEquals("CheckPoint Name", checkPointName, event.getCheckpointName()); } return checkpoint.join(); }
Example #4
Source File: SingleThreadEndToEndTest.java From pravega with Apache License 2.0 | 6 votes |
@Test(timeout = 30000) public void testReadWrite() throws Exception { @Cleanup("stopAllServices") SetupUtils setupUtils = new SetupUtils(); setupUtils.startAllServices(1); setupUtils.createTestStream("stream", 1); @Cleanup EventStreamWriter<Integer> writer = setupUtils.getIntegerWriter("stream"); writer.writeEvent(1); writer.flush(); @Cleanup EventStreamReader<Integer> reader = setupUtils.getIntegerReader("stream"); EventRead<Integer> event = reader.readNextEvent(100); Assert.assertEquals(1, (int) event.getEvent()); }
Example #5
Source File: ClientReader.java From pravega with Apache License 2.0 | 6 votes |
@SneakyThrows private void readNextItem(Consumer<ReadItem> eventHandler) { int readAttempts = MAX_READ_ATTEMPTS; long timeoutMillis = ClientReader.this.testConfig.getTimeout().toMillis(); while (readAttempts-- > 0) { EventRead<byte[]> readResult = READ_RETRY.run(() -> getReader().readNextEvent(timeoutMillis)); if (readResult.getEvent() == null && readAttempts > 0) { // EventStreamReader.readNextEvent() will return null if we get no new events within the given timeout. // Retry the read up to the maximum allowed number of times before giving up. Thread.sleep(timeoutMillis / MAX_READ_ATTEMPTS); } else if (readResult.getEvent() == null) { // We are done. close(); return; } else { StreamReadItem readItem = toReadItem(readResult.getEvent(), readResult.getEventPointer()); eventHandler.accept(readItem); return; } } }
Example #6
Source File: EventStreamReaderTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(timeout = 10000) public void testCheckpoint() throws SegmentSealedException, ReaderNotInReaderGroupException { AtomicLong clock = new AtomicLong(); MockSegmentStreamFactory segmentStreamFactory = new MockSegmentStreamFactory(); Orderer orderer = new Orderer(); ReaderGroupStateManager groupState = Mockito.mock(ReaderGroupStateManager.class); EventStreamReaderImpl<byte[]> reader = new EventStreamReaderImpl<>(segmentStreamFactory, segmentStreamFactory, new ByteArraySerializer(), groupState, orderer, clock::get, ReaderConfig.builder().build(), createWatermarkReaders(), Mockito.mock(Controller.class)); Segment segment = Segment.fromScopedName("Foo/Bar/0"); Mockito.when(groupState.acquireNewSegmentsIfNeeded(eq(0L), any())) .thenReturn(ImmutableMap.of(new SegmentWithRange(segment, 0, 1), 0L)) .thenReturn(Collections.emptyMap()); Mockito.when(groupState.getEndOffsetForSegment(any(Segment.class))).thenReturn(Long.MAX_VALUE); SegmentOutputStream stream = segmentStreamFactory.createOutputStreamForSegment(segment, segmentSealedCallback, writerConfig, DelegationTokenProviderFactory.createWithEmptyToken()); ByteBuffer buffer = writeInt(stream, 1); Mockito.when(groupState.getCheckpoint()).thenReturn("Foo").thenReturn(null); EventRead<byte[]> eventRead = reader.readNextEvent(0); assertTrue(eventRead.isCheckpoint()); assertNull(eventRead.getEvent()); assertEquals("Foo", eventRead.getCheckpointName()); InOrder order = Mockito.inOrder(groupState); order.verify(groupState).getCheckpoint(); order.verify(groupState, Mockito.never()).checkpoint(Mockito.anyString(), Mockito.any()); assertEquals(buffer, ByteBuffer.wrap(reader.readNextEvent(0).getEvent())); assertNull(reader.readNextEvent(0).getEvent()); order.verify(groupState).checkpoint(Mockito.eq("Foo"), Mockito.any()); order.verify(groupState).getCheckpoint(); reader.close(); }
Example #7
Source File: EventStreamReaderImpl.java From pravega with Apache License 2.0 | 5 votes |
@Override public EventRead<Type> readNextEvent(long timeoutMillis) throws ReinitializationRequiredException, TruncatedDataException { synchronized (readers) { Preconditions.checkState(!closed, "Reader is closed"); try { return readNextEventInternal(timeoutMillis); } catch (ReaderNotInReaderGroupException e) { close(); throw new ReinitializationRequiredException(e); } } }
Example #8
Source File: ConsoleReader.java From pravega-samples with Apache License 2.0 | 5 votes |
/** * This method continuously performs two tasks: first, it reads events that are being written by console writer * or by any other process in that stream. Second, it creates a new StreamCut after every read event. The new * {@link StreamCut} represents the current tail of the {@link Stream} and it may be used to read events to or from * that position in the {@link Stream}. */ public void run() { final ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder().disableAutomaticCheckpoints() .stream(Stream.of(scope, streamName)).build(); try (ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, controllerURI); EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build())) { // Create the ReaderGroup to which readers will belong to. readerGroupManager.createReaderGroup(readerGroupName, readerGroupConfig); @Cleanup ReaderGroup readerGroup = readerGroupManager.getReaderGroup(readerGroupName); EventStreamReader<String> reader = clientFactory.createReader("backgroundReader", readerGroupName, new JavaSerializer<>(), ReaderConfig.builder().build()); EventRead<String> event; // Start main loop to continuously read and display events written to the scope/stream. log.info("Start reading events from {}/{}.", scope, streamName); do { event = reader.readNextEvent(READER_TIMEOUT_MS); if (event.getEvent() != null) { // TODO: Problem finding logback.xml in Pravega example applications (Issue #87). System.out.println("[BackgroundReader] Read event: " + event.getEvent()); log.info("[BackgroundReader] Read event: {}.", event.getEvent()); } // Update the StreamCut after every event read, just in case the user wants to use it. if (!event.isCheckpoint()) { readerGroup.initiateCheckpoint("myCheckpoint" + System.nanoTime(), executor) .thenAccept(checkpoint -> lastStreamCut.set(checkpoint.asImpl().getPositions())); } } while (!end.get()); } catch (ReinitializationRequiredException e) { // We do not expect this Exception from the reader in this situation, so we leave. log.error("Non-expected reader re-initialization."); } }
Example #9
Source File: SimpleReader.java From pravega-samples with Apache License 2.0 | 5 votes |
public void run() { setRunning(true); final String readerGroup = UUID.randomUUID().toString().replace("-", ""); final ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder() .stream(Stream.of(scope, streamName)) .build(); try (ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, controllerURI)) { readerGroupManager.createReaderGroup(readerGroup, readerGroupConfig); } try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build()); EventStreamReader<T> reader = clientFactory.createReader("reader", readerGroup, serializer, ReaderConfig.builder().build())) { while (isRunning()) { try { EventRead<T> event = reader.readNextEvent(READER_TIMEOUT_MS); T eventData = event.getEvent(); if (eventData != null) { onNext.accept(event.getEvent()); } } catch (ReinitializationRequiredException e) { onError.accept(e); } } } }
Example #10
Source File: StreamCutsExample.java From pravega-samples with Apache License 2.0 | 5 votes |
/** * This method is an example of bounded processing in Pravega with {@link StreamCut}s. {@link ReaderGroupConfig} * contains the information related to the {@link Stream}s to be read as well as the (optional) user-defined * boundaries in the form of {@link StreamCut}s that will limit the events to be read by reader processes. Note that * event readers (i.e., {@link EventStreamReader}) are agnostic to any notion of boundaries and they do not interact * with {@link StreamCut}s; they only consume events, which will be bounded within specific {@link Stream} slices as * configured in {@link ReaderGroupConfig}. The method basically creates a string representation of the events read * from {@link Stream}s within the bounds defined in the configuration parameter. * * @param config Configuration for the {@link ReaderGroup}, possibly containing {@link StreamCut} boundaries for * limiting the number of events to read. * @return String representation of the events read by the reader. */ public String printBoundedStreams(ReaderGroupConfig config) { StringBuilder result = new StringBuilder(); final String randomId = String.valueOf(new Random(System.nanoTime()).nextInt()); try (ReaderGroupManager manager = ReaderGroupManager.withScope(scope, controllerURI); EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build())) { final String readerGroupName = "RG" + randomId; manager.createReaderGroup(readerGroupName, config); @Cleanup EventStreamReader<String> reader = clientFactory.createReader(randomId, readerGroupName, new JavaSerializer<>(), ReaderConfig.builder().build()); // Write dummy events that identify each Stream. EventRead<String> event; do { event = reader.readNextEvent(1000); if (event.getEvent() != null) { result = result.append(event.getEvent()).append('|'); } } while (event.isCheckpoint() || event.getEvent() != null); result = result.append('\n'); } catch (ReinitializationRequiredException e) { // We do not expect this Exception from the reader in this situation, so we leave. log.error("Non-expected reader re-initialization."); } return result.toString(); }
Example #11
Source File: HelloWorldReader.java From pravega-samples with Apache License 2.0 | 5 votes |
public void run() { StreamManager streamManager = StreamManager.create(controllerURI); final boolean scopeIsNew = streamManager.createScope(scope); StreamConfiguration streamConfig = StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.fixed(1)) .build(); final boolean streamIsNew = streamManager.createStream(scope, streamName, streamConfig); final String readerGroup = UUID.randomUUID().toString().replace("-", ""); final ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder() .stream(Stream.of(scope, streamName)) .build(); try (ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, controllerURI)) { readerGroupManager.createReaderGroup(readerGroup, readerGroupConfig); } try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build()); EventStreamReader<String> reader = clientFactory.createReader("reader", readerGroup, new JavaSerializer<String>(), ReaderConfig.builder().build())) { System.out.format("Reading all the events from %s/%s%n", scope, streamName); EventRead<String> event = null; do { try { event = reader.readNextEvent(READER_TIMEOUT_MS); if (event.getEvent() != null) { System.out.format("Read event '%s'%n", event.getEvent()); } } catch (ReinitializationRequiredException e) { //There are certain circumstances where the reader needs to be reinitialized e.printStackTrace(); } } while (event.getEvent() != null); System.out.format("No more events from %s/%s%n", scope, streamName); } }
Example #12
Source File: FlinkPravegaReaderTest.java From flink-connectors with Apache License 2.0 | 5 votes |
@Override public IntegerWithEventPointer extractEvent(EventRead<IntegerWithEventPointer> eventRead) { if (!includeMetadata) { return super.extractEvent(eventRead); } IntegerWithEventPointer event = eventRead.getEvent(); event.setEventPointer(eventRead.getEventPointer()); return event; }
Example #13
Source File: EventStreamReaderTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(timeout = 10000) public void testEndOfSegmentWithoutSuccessors() throws SegmentSealedException, ReaderNotInReaderGroupException { AtomicLong clock = new AtomicLong(); MockSegmentStreamFactory segmentStreamFactory = new MockSegmentStreamFactory(); Orderer orderer = new Orderer(); ReaderGroupStateManager groupState = Mockito.mock(ReaderGroupStateManager.class); EventStreamReaderImpl<byte[]> reader = new EventStreamReaderImpl<>(segmentStreamFactory, segmentStreamFactory, new ByteArraySerializer(), groupState, orderer, clock::get, ReaderConfig.builder().build(), createWatermarkReaders(), Mockito.mock(Controller.class)); Segment segment = Segment.fromScopedName("Foo/Bar/0"); Mockito.when(groupState.acquireNewSegmentsIfNeeded(eq(0L), any())) .thenReturn(ImmutableMap.of(new SegmentWithRange(segment, 0, 1), 0L)) .thenReturn(Collections.emptyMap()); Mockito.when(groupState.getEndOffsetForSegment(any(Segment.class))).thenReturn(Long.MAX_VALUE); Mockito.when(groupState.handleEndOfSegment(any())).thenReturn(true); SegmentOutputStream stream = segmentStreamFactory.createOutputStreamForSegment(segment, segmentSealedCallback, writerConfig, DelegationTokenProviderFactory.createWithEmptyToken()); ByteBuffer buffer = writeInt(stream, 1); EventRead<byte[]> read = reader.readNextEvent(0); byte[] event = read.getEvent(); assertEquals(buffer, ByteBuffer.wrap(event)); read = reader.readNextEvent(0); assertNull(read.getEvent()); read = reader.readNextEvent(0); assertNull(read.getEvent()); assertEquals(0, reader.getReaders().size()); assertEquals(1, reader.getRanges().size()); Mockito.when(groupState.getCheckpoint()).thenReturn("CP1"); read = reader.readNextEvent(0); assertTrue(read.isCheckpoint()); read = reader.readNextEvent(0); assertNull(read.getEvent()); assertEquals(0, reader.getRanges().size()); reader.close(); }
Example #14
Source File: EventStreamReaderTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(timeout = 10000) public void testRead() throws SegmentSealedException, ReaderNotInReaderGroupException { AtomicLong clock = new AtomicLong(); MockSegmentStreamFactory segmentStreamFactory = new MockSegmentStreamFactory(); Orderer orderer = new Orderer(); ReaderGroupStateManager groupState = Mockito.mock(ReaderGroupStateManager.class); EventStreamReaderImpl<byte[]> reader = new EventStreamReaderImpl<>(segmentStreamFactory, segmentStreamFactory, new ByteArraySerializer(), groupState, orderer, clock::get, ReaderConfig.builder().build(), createWatermarkReaders(), Mockito.mock(Controller.class)); SegmentWithRange segment = new SegmentWithRange(Segment.fromScopedName("Foo/Bar/0"), 0, 1); Mockito.when(groupState.acquireNewSegmentsIfNeeded(eq(0L), any())).thenReturn(ImmutableMap.of(segment, 0L)).thenReturn(Collections.emptyMap()); Mockito.when(groupState.getEndOffsetForSegment(any(Segment.class))).thenReturn(Long.MAX_VALUE); SegmentOutputStream stream = segmentStreamFactory.createOutputStreamForSegment(segment.getSegment(), segmentSealedCallback, writerConfig, DelegationTokenProviderFactory.createWithEmptyToken()); ByteBuffer buffer1 = writeInt(stream, 1); ByteBuffer buffer2 = writeInt(stream, 2); ByteBuffer buffer3 = writeInt(stream, 3); EventRead<byte[]> e = reader.readNextEvent(0); assertEquals(buffer1, ByteBuffer.wrap(e.getEvent())); assertEquals(new Long(WireCommands.TYPE_PLUS_LENGTH_SIZE + Integer.BYTES), e.getPosition().asImpl().getOffsetForOwnedSegment(Segment.fromScopedName("Foo/Bar/0"))); e = reader.readNextEvent(0); assertEquals(buffer2, ByteBuffer.wrap(e.getEvent())); assertEquals(new Long(2 * (WireCommands.TYPE_PLUS_LENGTH_SIZE + Integer.BYTES)), e.getPosition().asImpl().getOffsetForOwnedSegment(Segment.fromScopedName("Foo/Bar/0"))); e = reader.readNextEvent(0); assertEquals(buffer3, ByteBuffer.wrap(e.getEvent())); assertEquals(new Long(3 * (WireCommands.TYPE_PLUS_LENGTH_SIZE + Integer.BYTES)), e.getPosition().asImpl().getOffsetForOwnedSegment(Segment.fromScopedName("Foo/Bar/0"))); e = reader.readNextEvent(0); assertNull(e.getEvent()); assertEquals(new Long(-1), e.getPosition().asImpl().getOffsetForOwnedSegment(Segment.fromScopedName("Foo/Bar/0"))); reader.close(); }
Example #15
Source File: EventStreamReaderTest.java From pravega with Apache License 2.0 | 5 votes |
@Test public void testEventPointer() throws SegmentSealedException, NoSuchEventException, ReaderNotInReaderGroupException { AtomicLong clock = new AtomicLong(); MockSegmentStreamFactory segmentStreamFactory = new MockSegmentStreamFactory(); Orderer orderer = new Orderer(); ReaderGroupStateManager groupState = Mockito.mock(ReaderGroupStateManager.class); EventStreamReaderImpl<byte[]> reader = new EventStreamReaderImpl<>(segmentStreamFactory, segmentStreamFactory, new ByteArraySerializer(), groupState, orderer, clock::get, ReaderConfig.builder().build(), createWatermarkReaders(), Mockito.mock(Controller.class)); Segment segment = Segment.fromScopedName("Foo/Bar/0"); Mockito.when(groupState.acquireNewSegmentsIfNeeded(eq(0L), any())) .thenReturn(ImmutableMap.of(new SegmentWithRange(segment, 0, 1), 0L)) .thenReturn(Collections.emptyMap()); Mockito.when(groupState.getEndOffsetForSegment(any(Segment.class))).thenReturn(Long.MAX_VALUE); SegmentOutputStream stream = segmentStreamFactory.createOutputStreamForSegment(segment, segmentSealedCallback, writerConfig, DelegationTokenProviderFactory.createWithEmptyToken()); ByteBuffer buffer1 = writeInt(stream, 1); ByteBuffer buffer2 = writeInt(stream, 2); ByteBuffer buffer3 = writeInt(stream, 3); EventRead<byte[]> event1 = reader.readNextEvent(0); EventRead<byte[]> event2 = reader.readNextEvent(0); EventRead<byte[]> event3 = reader.readNextEvent(0); assertEquals(buffer1, ByteBuffer.wrap(event1.getEvent())); assertEquals(buffer2, ByteBuffer.wrap(event2.getEvent())); assertEquals(buffer3, ByteBuffer.wrap(event3.getEvent())); assertNull(reader.readNextEvent(0).getEvent()); assertEquals(buffer1, ByteBuffer.wrap(reader.fetchEvent(event1.getEventPointer()))); assertEquals(buffer3, ByteBuffer.wrap(reader.fetchEvent(event3.getEventPointer()))); assertEquals(buffer2, ByteBuffer.wrap(reader.fetchEvent(event2.getEventPointer()))); reader.close(); }
Example #16
Source File: ReaderCheckpointTest.java From pravega with Apache License 2.0 | 5 votes |
private <T extends Serializable> List<EventRead<T>> readEvents(final String scope, final String readerId) { List<EventRead<T>> events = new ArrayList<>(); final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI); try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig); EventStreamReader<T> reader = clientFactory.createReader(readerId, READER_GROUP_NAME, new JavaSerializer<T>(), readerConfig)) { log.info("Reading events from {}/{} with readerId: {}", scope, STREAM, readerId); EventRead<T> event = null; do { try { event = reader.readNextEvent(READ_TIMEOUT); if (event.getEvent() != null) { log.info("Read event {}", event.getEvent()); events.add(event); } if (event.isCheckpoint()) { log.info("Read a check point event, checkpointName: {}", event.getCheckpointName()); } } catch (ReinitializationRequiredException e) { log.error("Exception while reading event using readerId: {}", readerId, e); fail("Reinitialization Exception is not expected"); } } while (event.isCheckpoint() || event.getEvent() != null); //stop reading if event read(non-checkpoint) is null. log.info("No more events from {}/{} for readerId: {}", scope, STREAM, readerId); } //reader.close() will automatically invoke ReaderGroup#readerOffline(String, Position) return events; }
Example #17
Source File: EventStreamReaderTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(timeout = 10000) public void testSilentCheckpointFollowingCheckpoint() throws SegmentSealedException, ReaderNotInReaderGroupException { AtomicLong clock = new AtomicLong(); MockSegmentStreamFactory segmentStreamFactory = new MockSegmentStreamFactory(); Orderer orderer = new Orderer(); ReaderGroupStateManager groupState = Mockito.mock(ReaderGroupStateManager.class); EventStreamReaderImpl<byte[]> reader = new EventStreamReaderImpl<>(segmentStreamFactory, segmentStreamFactory, new ByteArraySerializer(), groupState, orderer, clock::get, ReaderConfig.builder().build(), createWatermarkReaders(), Mockito.mock(Controller.class)); Segment segment = Segment.fromScopedName("Foo/Bar/0"); Mockito.when(groupState.acquireNewSegmentsIfNeeded(eq(0L), any())) .thenReturn(ImmutableMap.of(new SegmentWithRange(segment, 0, 1), 0L)) .thenReturn(Collections.emptyMap()); Mockito.when(groupState.getEndOffsetForSegment(any(Segment.class))).thenReturn(Long.MAX_VALUE); SegmentOutputStream stream = segmentStreamFactory.createOutputStreamForSegment(segment, segmentSealedCallback, writerConfig, DelegationTokenProviderFactory.createWithEmptyToken()); ByteBuffer buffer = writeInt(stream, 1); Mockito.doReturn(true).when(groupState).isCheckpointSilent(Mockito.eq(ReaderGroupImpl.SILENT + "Foo")); Mockito.when(groupState.getCheckpoint()) .thenReturn("Bar") .thenReturn(ReaderGroupImpl.SILENT + "Foo") .thenReturn(null); EventRead<byte[]> eventRead = reader.readNextEvent(10000); assertTrue(eventRead.isCheckpoint()); assertNull(eventRead.getEvent()); assertEquals("Bar", eventRead.getCheckpointName()); assertEquals(buffer, ByteBuffer.wrap(reader.readNextEvent(0).getEvent())); InOrder order = Mockito.inOrder(groupState); order.verify(groupState).getCheckpoint(); order.verify(groupState).checkpoint(Mockito.eq("Bar"), Mockito.any()); order.verify(groupState).getCheckpoint(); order.verify(groupState).checkpoint(Mockito.eq(ReaderGroupImpl.SILENT + "Foo"), Mockito.any()); order.verify(groupState).getCheckpoint(); reader.close(); }
Example #18
Source File: EventStreamReaderTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(timeout = 10000) public void testCheckpointFollowingSilentCheckpoint() throws SegmentSealedException, ReaderNotInReaderGroupException { AtomicLong clock = new AtomicLong(); MockSegmentStreamFactory segmentStreamFactory = new MockSegmentStreamFactory(); Orderer orderer = new Orderer(); ReaderGroupStateManager groupState = Mockito.mock(ReaderGroupStateManager.class); EventStreamReaderImpl<byte[]> reader = new EventStreamReaderImpl<>(segmentStreamFactory, segmentStreamFactory, new ByteArraySerializer(), groupState, orderer, clock::get, ReaderConfig.builder().build(), createWatermarkReaders(), Mockito.mock(Controller.class)); Segment segment = Segment.fromScopedName("Foo/Bar/0"); Mockito.when(groupState.acquireNewSegmentsIfNeeded(eq(0L), any())) .thenReturn(ImmutableMap.of(new SegmentWithRange(segment, 0, 1), 0L)) .thenReturn(Collections.emptyMap()); Mockito.when(groupState.getEndOffsetForSegment(any(Segment.class))).thenReturn(Long.MAX_VALUE); SegmentOutputStream stream = segmentStreamFactory.createOutputStreamForSegment(segment, segmentSealedCallback, writerConfig, DelegationTokenProviderFactory.createWithEmptyToken()); ByteBuffer buffer = writeInt(stream, 1); Mockito.doReturn(true).when(groupState).isCheckpointSilent(Mockito.eq(ReaderGroupImpl.SILENT + "Foo")); Mockito.when(groupState.getCheckpoint()) .thenReturn(ReaderGroupImpl.SILENT + "Foo") .thenReturn("Bar") .thenReturn(null); EventRead<byte[]> eventRead = reader.readNextEvent(10000); assertTrue(eventRead.isCheckpoint()); assertNull(eventRead.getEvent()); assertEquals("Bar", eventRead.getCheckpointName()); InOrder order = Mockito.inOrder(groupState); order.verify(groupState).getCheckpoint(); order.verify(groupState).checkpoint(Mockito.eq(ReaderGroupImpl.SILENT + "Foo"), Mockito.any()); assertEquals(buffer, ByteBuffer.wrap(reader.readNextEvent(0).getEvent())); order.verify(groupState).getCheckpoint(); order.verify(groupState).checkpoint(Mockito.eq("Bar"), Mockito.any()); order.verify(groupState).getCheckpoint(); reader.close(); }
Example #19
Source File: EventStreamReaderTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(timeout = 10000) public void testDataTruncated() throws SegmentSealedException, ReaderNotInReaderGroupException { AtomicLong clock = new AtomicLong(); MockSegmentStreamFactory segmentStreamFactory = new MockSegmentStreamFactory(); Orderer orderer = new Orderer(); ReaderGroupStateManager groupState = Mockito.mock(ReaderGroupStateManager.class); EventStreamReaderImpl<byte[]> reader = new EventStreamReaderImpl<>(segmentStreamFactory, segmentStreamFactory, new ByteArraySerializer(), groupState, orderer, clock::get, ReaderConfig.builder().build(), createWatermarkReaders(), Mockito.mock(Controller.class)); Segment segment = Segment.fromScopedName("Foo/Bar/0"); Mockito.when(groupState.acquireNewSegmentsIfNeeded(eq(0L), any())) .thenReturn(ImmutableMap.of(new SegmentWithRange(segment, 0, 1), 0L)) .thenReturn(Collections.emptyMap()); Mockito.when(groupState.getEndOffsetForSegment(any(Segment.class))).thenReturn(Long.MAX_VALUE); SegmentOutputStream stream = segmentStreamFactory.createOutputStreamForSegment(segment, segmentSealedCallback, writerConfig, DelegationTokenProviderFactory.createWithEmptyToken()); SegmentMetadataClient metadataClient = segmentStreamFactory.createSegmentMetadataClient(segment, DelegationTokenProviderFactory.createWithEmptyToken()); ByteBuffer buffer1 = writeInt(stream, 1); ByteBuffer buffer2 = writeInt(stream, 2); writeInt(stream, 3); long length = metadataClient.fetchCurrentSegmentLength(); assertEquals(0, length % 3); EventRead<byte[]> event1 = reader.readNextEvent(0); assertEquals(buffer1, ByteBuffer.wrap(event1.getEvent())); metadataClient.truncateSegment(length / 3); assertEquals(buffer2, ByteBuffer.wrap(reader.readNextEvent(0).getEvent())); metadataClient.truncateSegment(length); ByteBuffer buffer4 = writeInt(stream, 4); assertThrows(TruncatedDataException.class, () -> reader.readNextEvent(0)); assertEquals(buffer4, ByteBuffer.wrap(reader.readNextEvent(0).getEvent())); assertNull(reader.readNextEvent(0).getEvent()); assertThrows(NoSuchEventException.class, () -> reader.fetchEvent(event1.getEventPointer())); reader.close(); }
Example #20
Source File: ReadTest.java From pravega with Apache License 2.0 | 5 votes |
/** * Reads events and puts them in a queue for later checking (potentially by another thread). */ private void readAndQueueEvents(EventStreamReader<String> reader, int eventsToWrite, Queue<Entry<Integer, PositionImpl>> readEventsPositions) { int eventCount = 1; for (int i = 0; i < eventsToWrite; i++) { final EventRead<String> event = reader.readNextEvent(1000); if (event.getEvent() != null && !event.isCheckpoint()) { // The reader should own only 1 segment. readEventsPositions.add(new AbstractMap.SimpleEntry<>(eventCount, (PositionImpl) event.getPosition())); eventCount++; } } }
Example #21
Source File: AbstractReadWriteTest.java From pravega with Apache License 2.0 | 5 votes |
private <T> int readEvents(EventStreamReader<T> reader, int limit, boolean reinitializationExpected) { EventRead<T> event = null; int validEvents = 0; boolean reinitializationRequired = false; try { do { try { event = reader.readNextEvent(READ_TIMEOUT); log.debug("Read event result in readEvents: {}.", event.getEvent()); if (event.getEvent() != null) { validEvents++; } reinitializationRequired = false; } catch (ReinitializationRequiredException e) { log.error("Exception while reading event using readerId: {}", reader, e); if (reinitializationExpected) { reinitializationRequired = true; } else { fail("Reinitialization Exception is not expected"); } } } while (reinitializationRequired || ((event.getEvent() != null || event.isCheckpoint()) && validEvents < limit)); } finally { closeReader(reader); } return validEvents; }
Example #22
Source File: StreamCutsTest.java From pravega with Apache License 2.0 | 5 votes |
private <T extends Serializable> int createReaderAndReadEvents(ReaderGroupManager rgMgr, EventStreamClientFactory clientFactory, String rGroupId, int readerIndex) { // create a reader. EventStreamReader<T> reader = clientFactory.createReader(rGroupId + "-" + readerIndex, rGroupId, new JavaSerializer<>(), ReaderConfig.builder().build()); EventRead<T> event = null; int validEvents = 0; AtomicBoolean sealedSegmentUpdated = new AtomicBoolean(false); try { do { try { event = reader.readNextEvent(READ_TIMEOUT); log.debug("Read event result in readEvents: {}.", event.getEvent()); if (event.getEvent() == null && !event.isCheckpoint() && !sealedSegmentUpdated.get()) { // initiate a checkpoint to ensure all sealed segments are acquired by the reader. ReaderGroup readerGroup = rgMgr.getReaderGroup(rGroupId); readerGroup.initiateCheckpoint("chkPoint", chkPointExecutor) .whenComplete((checkpoint, t) -> { if (t != null) { log.error("Checkpoint operation failed", t); } else { log.info("Checkpoint {} completed", checkpoint); sealedSegmentUpdated.set(true); } }); } if (event.getEvent() != null) { validEvents++; } } catch (ReinitializationRequiredException e) { log.error("Reinitialization Exception while reading event using readerId: {}", reader, e); fail("Reinitialization Exception is not expected"); } } while (event.getEvent() != null || event.isCheckpoint() || !sealedSegmentUpdated.get()); } finally { closeReader(reader); } return validEvents; }
Example #23
Source File: ReaderCheckpointTest.java From pravega with Apache License 2.0 | 5 votes |
private void readEventsAndVerify(final String scope, int startInclusive, int endExclusive) { log.info("Read and Verify events between [{},{})", startInclusive, endExclusive); final List<CompletableFuture<List<EventRead<Integer>>>> readResults = new ArrayList<>(); //start reading using configured number of readers for (int i = 0; i < NUMBER_OF_READERS; i++) { readResults.add(asyncReadEvents(scope, "reader-" + i)); } //results from all readers List<List<EventRead<Integer>>> results = Futures.allOfWithResults(readResults).join(); List<EventRead<Integer>> eventsRead = results.stream().flatMap(List::stream).collect(Collectors.toList()); verifyEvents(eventsRead, startInclusive, endExclusive); }
Example #24
Source File: ReaderCheckpointTest.java From pravega with Apache License 2.0 | 5 votes |
private CompletableFuture<List<EventRead<Integer>>> asyncReadEvents(final String scope, final String readerId) { CompletableFuture<List<EventRead<Integer>>> result = CompletableFuture.supplyAsync( () -> readEvents(scope, readerId), readerExecutor); Futures.exceptionListener(result, t -> log.error("Error observed while reading events for reader id :{}", readerId, t)); return result; }
Example #25
Source File: ReaderCheckpointTest.java From pravega with Apache License 2.0 | 5 votes |
private void verifyEvents(final List<EventRead<Integer>> events, int startInclusive, int endExclusive) { Supplier<java.util.stream.Stream<Integer>> streamSupplier = () -> events.stream().map(EventRead::getEvent).sorted(); IntSummaryStatistics stats = streamSupplier.get().collect(Collectors.summarizingInt(value -> value)); assertTrue(String.format("Check for first event: %d, %d", stats.getMin(), startInclusive), stats.getMin() == startInclusive); assertTrue(String.format("Check for last event: %d, %d", stats.getMax(), endExclusive), stats.getMax() == endExclusive - 1); //Check for missing events assertEquals(String.format("Check for number of events: %d, %d, %d", endExclusive, startInclusive, stats.getCount()), endExclusive - startInclusive, stats.getCount()); assertEquals(String.format("Check for duplicate events: %d, %d, %d", endExclusive, startInclusive, streamSupplier.get().distinct().count()), endExclusive - startInclusive, streamSupplier.get().distinct().count()); }
Example #26
Source File: EndToEndReaderGroupTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 30000) public void testDeleteReaderGroup() throws Exception { StreamConfiguration config = getStreamConfig(); LocalController controller = (LocalController) controllerWrapper.getController(); controllerWrapper.getControllerService().createScope("test").get(); controller.createStream("test", "test", config).get(); @Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder() .controllerURI(URI.create("tcp://" + serviceHost)) .build()); @Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory); @Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory, connectionFactory); // Create a ReaderGroup groupManager.createReaderGroup("group", ReaderGroupConfig.builder().disableAutomaticCheckpoints() .stream("test/test").build()); // Create a Reader EventStreamReader<String> reader = clientFactory.createReader("reader1", "group", serializer, ReaderConfig.builder().build()); // Write events into the stream. @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter("test", serializer, EventWriterConfig.builder().build()); writer.writeEvent("0", "data1").get(); EventRead<String> eventRead = reader.readNextEvent(10000); assertEquals("data1", eventRead.getEvent()); // Close the reader, this internally invokes ReaderGroup#readerOffline reader.close(); //delete the readerGroup. groupManager.deleteReaderGroup("group"); // create a new readerGroup with the same name. groupManager.createReaderGroup("group", ReaderGroupConfig.builder().disableAutomaticCheckpoints() .stream("test/test").build()); reader = clientFactory.createReader("reader1", "group", new JavaSerializer<>(), ReaderConfig.builder().build()); eventRead = reader.readNextEvent(10000); assertEquals("data1", eventRead.getEvent()); }
Example #27
Source File: EndToEndReaderGroupTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 30000) public void testGenerateStreamCuts() throws Exception { final Stream stream = Stream.of(SCOPE, STREAM); final String group = "group"; createScope(SCOPE); createStream(SCOPE, STREAM, ScalingPolicy.fixed(1)); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerURI).build()); @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, serializer, EventWriterConfig.builder().build()); //Prep the stream with data. //1.Write events with event size of 30 writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(1)).join(); writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(2)).join(); writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(3)).join(); writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(4)).join(); @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerURI); groupManager.createReaderGroup(group, ReaderGroupConfig .builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(1000) .stream(stream) .build()); ReaderGroup readerGroup = groupManager.getReaderGroup(group); //Create a reader @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, serializer, ReaderConfig.builder().build()); readAndVerify(reader, 1); @Cleanup("shutdown") InlineExecutor backgroundExecutor = new InlineExecutor(); CompletableFuture<Map<Stream, StreamCut>> sc = readerGroup.generateStreamCuts(backgroundExecutor); // The reader group state will be updated after 1 second. TimeUnit.SECONDS.sleep(1); EventRead<String> data = reader.readNextEvent(15000); assertTrue(Futures.await(sc)); // wait until the streamCut is obtained. //expected segment 0 offset is 30L. Map<Segment, Long> expectedOffsetMap = ImmutableMap.of(getSegment(0, 0), 30L); Map<Stream, StreamCut> scMap = sc.join(); assertEquals("StreamCut for a single stream expected", 1, scMap.size()); assertEquals("StreamCut pointing ot offset 30L expected", new StreamCutImpl(stream, expectedOffsetMap), scMap.get(stream)); }
Example #28
Source File: EndToEndReaderGroupTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 30000) public void testReaderOffline() throws Exception { StreamConfiguration config = getStreamConfig(); LocalController controller = (LocalController) controllerWrapper.getController(); controllerWrapper.getControllerService().createScope("test").get(); controller.createStream("test", "test", config).get(); @Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder() .controllerURI(URI.create("tcp://" + serviceHost)) .build()); @Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory); @Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory, connectionFactory); groupManager.createReaderGroup("group", ReaderGroupConfig.builder().disableAutomaticCheckpoints() .stream("test/test").build()); final ReaderGroup readerGroup = groupManager.getReaderGroup("group"); // create a reader @Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", "group", new JavaSerializer<>(), ReaderConfig.builder().build()); EventRead<String> eventRead = reader1.readNextEvent(100); assertNull("Event read should be null since no events are written", eventRead.getEvent()); @Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("reader2", "group", new JavaSerializer<>(), ReaderConfig.builder().build()); //make reader1 offline readerGroup.readerOffline("reader1", null); // write events into the stream. @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter("test", new JavaSerializer<>(), EventWriterConfig.builder().build()); writer.writeEvent("0", "data1").get(); writer.writeEvent("0", "data2").get(); eventRead = reader2.readNextEvent(10000); assertEquals("data1", eventRead.getEvent()); }
Example #29
Source File: EndToEndTruncationTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 30000) public void testWriteDuringTruncationAndDeletion() throws Exception { StreamConfiguration config = StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.byEventRate(10, 2, 2)) .build(); LocalController controller = (LocalController) controllerWrapper.getController(); controllerWrapper.getControllerService().createScope("test").get(); controller.createStream("test", "test", config).get(); config = StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)) .build(); controller.updateStream("test", "test", config).get(); @Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder() .controllerURI(URI.create("tcp://" + serviceHost)) .build()); @Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory); @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter("test", new JavaSerializer<>(), EventWriterConfig.builder().build()); // routing key "0" translates to key 0.8. This write happens to segment 1. writer.writeEvent("0", "truncationTest1").get(); // scale down to one segment. Stream stream = new StreamImpl("test", "test"); Map<Double, Double> map = new HashMap<>(); map.put(0.0, 1.0); assertTrue("Stream Scale down", controller.scaleStream(stream, Lists.newArrayList(0L, 1L), map, executor).getFuture().get()); // truncate stream at segment 2, offset 0. Map<Long, Long> streamCutPositions = new HashMap<>(); streamCutPositions.put(computeSegmentId(2, 1), 0L); assertTrue("Truncate stream", controller.truncateStream("test", "test", streamCutPositions).get()); // routing key "2" translates to key 0.2. // this write translates to a write to Segment 0, but since segment 0 is truncated the write should happen on segment 2. // write to segment 0 writer.writeEvent("2", "truncationTest2").get(); @Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory, connectionFactory); groupManager.createReaderGroup("reader", ReaderGroupConfig.builder().disableAutomaticCheckpoints() .stream("test/test").build()); @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "reader", new JavaSerializer<>(), ReaderConfig.builder().build()); EventRead<String> event = reader.readNextEvent(10000); assertNotNull(event); assertEquals("truncationTest2", event.getEvent()); //Seal and Delete stream. assertTrue(controller.sealStream("test", "test").get()); assertTrue(controller.deleteStream("test", "test").get()); //write by an existing writer to a deleted stream should complete exceptionally. assertFutureThrows("Should throw NoSuchSegmentException", writer.writeEvent("2", "write to deleted stream"), e -> NoSuchSegmentException.class.isAssignableFrom(e.getClass())); //subsequent writes will throw an exception to the application. assertThrows(RuntimeException.class, () -> writer.writeEvent("test")); }
Example #30
Source File: EndToEndReaderGroupTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 30000) public void testMultiScopeReaderGroup() throws Exception { LocalController controller = (LocalController) controllerWrapper.getController(); // Config of two streams with same name and different scopes. String defaultScope = "test"; String scopeA = "scopeA"; String scopeB = "scopeB"; String streamName = "test"; // Create Scopes controllerWrapper.getControllerService().createScope(defaultScope).get(); controllerWrapper.getControllerService().createScope(scopeA).get(); controllerWrapper.getControllerService().createScope(scopeB).get(); // Create Streams. controller.createStream(scopeA, streamName, getStreamConfig()).get(); controller.createStream(scopeB, streamName, getStreamConfig()).get(); // Create ReaderGroup and reader. @Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder() .controllerURI(URI.create("tcp://" + serviceHost)) .build()); @Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(streamName, controller, connectionFactory); @Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(defaultScope, controller, clientFactory, connectionFactory); groupManager.createReaderGroup("group", ReaderGroupConfig.builder() .disableAutomaticCheckpoints() .stream(Stream.of(scopeA, streamName)) .stream(Stream.of(scopeB, streamName)) .build()); ReaderGroup readerGroup = groupManager.getReaderGroup("group"); @Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", "group", new JavaSerializer<>(), ReaderConfig.builder().build()); // Read empty stream. EventRead<String> eventRead = reader1.readNextEvent(100); assertNull("Event read should be null since no events are written", eventRead.getEvent()); // Write to scopeA stream. writeTestEvent(scopeA, streamName, 0); eventRead = reader1.readNextEvent(10000); assertEquals("0", eventRead.getEvent()); // Write to scopeB stream. writeTestEvent(scopeB, streamName, 1); eventRead = reader1.readNextEvent(10000); assertEquals("1", eventRead.getEvent()); // Verify ReaderGroup.getStreamNames(). Set<String> managedStreams = readerGroup.getStreamNames(); assertTrue(managedStreams.contains(Stream.of(scopeA, streamName).getScopedName())); assertTrue(managedStreams.contains(Stream.of(scopeB, streamName).getScopedName())); }