io.pravega.client.EventStreamClientFactory Java Examples
The following examples show how to use
io.pravega.client.EventStreamClientFactory.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StreamCutsTest.java From pravega with Apache License 2.0 | 6 votes |
/** * Check that all the stream slices represented by consecutive StreamCut pairs can be read correctly. * * @param manager Group manager for this scope. * @param clientFactory Client factory to instantiate new readers. * @param streamSlices StreamCuts lists to be combined and tested via bounded processing. */ private void readSliceBySliceAndVerify(ReaderGroupManager manager, EventStreamClientFactory clientFactory, int parallelSegments, List<Map<Stream, StreamCut>> streamSlices) { int readEvents; for (int i = 1; i < streamSlices.size(); i++) { log.debug("Reading events between starting StreamCut {} and ending StreamCut {}", streamSlices.get(i-1), streamSlices.get(i)); ReaderGroupConfig configBuilder = ReaderGroupConfig.builder().stream(Stream.of(SCOPE, STREAM_ONE)) .stream(Stream.of(SCOPE, STREAM_TWO)) .startingStreamCuts(streamSlices.get(i - 1)) .endingStreamCuts(streamSlices.get(i)).build(); // Create a new reader group per stream cut slice and read in parallel only events within the cut. final String readerGroupId = READER_GROUP + String.valueOf(i) + "-" + System.nanoTime(); manager.createReaderGroup(readerGroupId, configBuilder); readEvents = readAllEvents(manager, clientFactory, readerGroupId, parallelSegments).stream() .map(CompletableFuture::join) .reduce(Integer::sum).get(); log.debug("Read events by group {}: {}.", readerGroupId, readEvents); assertEquals("Expected events read: ", CUT_SIZE, readEvents); } }
Example #2
Source File: ReaderCheckpointTest.java From pravega with Apache License 2.0 | 6 votes |
private Map<Stream, StreamCut> generateStreamCuts(final ReaderGroup readerGroup) { log.info("Generate StreamCuts"); String readerId = "streamCut"; CompletableFuture<Map<io.pravega.client.stream.Stream, StreamCut>> streamCuts = null; final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI); try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE_2, clientConfig); EventStreamReader<Integer> reader = clientFactory.createReader(readerId, READER_GROUP_NAME, new JavaSerializer<Integer>(), readerConfig)) { streamCuts = readerGroup.generateStreamCuts(executor); //create checkpoint Exceptions.handleInterrupted(() -> TimeUnit.MILLISECONDS.sleep(GROUP_REFRESH_TIME_MILLIS)); // sleep for group refresh. //read the next event, this causes the reader to update its latest offset. EventRead<Integer> event = reader.readNextEvent(READ_TIMEOUT); assertTrue("No events expected as all events are read", (event.getEvent() == null) && (!event.isCheckpoint())); Futures.exceptionListener(streamCuts, t -> log.error("StreamCut generation failed", t)); assertTrue("Stream cut generation should be completed", Futures.await(streamCuts)); } catch (ReinitializationRequiredException e) { log.error("Exception while reading event using readerId: {}", readerId, e); fail("Reinitialization Exception is not expected"); } return streamCuts.join(); }
Example #3
Source File: FlinkPravegaUtils.java From flink-connectors with Apache License 2.0 | 6 votes |
/** * Creates a Pravga {@link EventStreamReader}. * * @param clientConfig The Pravega client configuration. * @param readerId The id of the Pravega reader. * @param readerGroupScopeName The reader group scope name. * @param readerGroupName The reader group name. * @param deserializationSchema The implementation to deserialize events from pravega streams. * @param readerConfig The reader configuration. * @param <T> The type of the event. * @return the create Pravega reader. */ public static <T> EventStreamReader<T> createPravegaReader( ClientConfig clientConfig, String readerId, String readerGroupScopeName, String readerGroupName, DeserializationSchema<T> deserializationSchema, ReaderConfig readerConfig) { // create the adapter between Pravega's serializers and Flink's serializers @SuppressWarnings("unchecked") final Serializer<T> deserializer = deserializationSchema instanceof WrappingSerializer ? ((WrappingSerializer<T>) deserializationSchema).getWrappedSerializer() : new FlinkDeserializer<>(deserializationSchema); return EventStreamClientFactory.withScope(readerGroupScopeName, clientConfig) .createReader(readerId, readerGroupName, deserializer, readerConfig); }
Example #4
Source File: ReaderCheckpointTest.java From pravega with Apache License 2.0 | 6 votes |
private Checkpoint createCheckPointAndVerify(final ReaderGroup readerGroup, final String checkPointName) { log.info("Create and verify check point {}", checkPointName); String readerId = "checkPointReader"; CompletableFuture<Checkpoint> checkpoint = null; final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI); try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE_1, clientConfig); EventStreamReader<Integer> reader = clientFactory.createReader(readerId, READER_GROUP_NAME, new JavaSerializer<Integer>(), readerConfig)) { checkpoint = readerGroup.initiateCheckpoint(checkPointName, executor); //create checkpoint //verify checkpoint event. EventRead<Integer> event = reader.readNextEvent(READ_TIMEOUT); assertTrue("Read for Checkpoint event", (event != null) && (event.isCheckpoint())); assertEquals("CheckPoint Name", checkPointName, event.getCheckpointName()); } return checkpoint.join(); }
Example #5
Source File: AbstractReadWriteTest.java From pravega with Apache License 2.0 | 6 votes |
private void createWritersInternal(EventStreamClientFactory clientFactory, final int writers, String scope, String stream, CompletableFuture<Void> writersComplete, boolean isTransactionalWriter) { testState.writersListComplete.add(writersComplete); log.info("Client factory details {}", clientFactory.toString()); log.info("Creating {} writers", writers); List<CompletableFuture<Void>> writerFutureList = new ArrayList<>(); log.info("Writers writing in the scope {}", scope); CompletableFuture.runAsync(() -> { for (int i = 0; i < writers; i++) { log.info("Starting writer: {} (is transactional? {})", i, isTransactionalWriter); final CompletableFuture<Void> writerFuture = isTransactionalWriter ? startWritingIntoTxn(instantiateTransactionalWriter("writer-" + i, clientFactory, stream)) : startWriting(instantiateWriter(clientFactory, stream)); Futures.exceptionListener(writerFuture, t -> log.error("Error while writing events:", t)); writerFutureList.add(writerFuture); } }, executorService).thenRun(() -> { testState.writers.addAll(writerFutureList); Futures.completeAfter(() -> Futures.allOf(writerFutureList), writersComplete); Futures.exceptionListener(writersComplete, t -> log.error("Exception while waiting for writers to complete", t)); }); }
Example #6
Source File: ReadWriteTest.java From pravega with Apache License 2.0 | 6 votes |
private CompletableFuture<Void> startNewWriter(final AtomicLong data, final EventStreamClientFactory clientFactory) { return CompletableFuture.runAsync(() -> { final EventStreamWriter<Long> writer = clientFactory.createEventWriter(STREAM_NAME, new JavaSerializer<Long>(), EventWriterConfig.builder().build()); for (int i = 0; i < NUM_EVENTS_BY_WRITER; i++) { long value = data.incrementAndGet(); log.info("Writing event {}", value); writer.writeEvent(String.valueOf(value), value); writer.flush(); } log.info("Closing writer {}", writer); writer.close(); }, writerPool); }
Example #7
Source File: ReadWriteTest.java From pravega with Apache License 2.0 | 6 votes |
private CompletableFuture<Void> startNewReader(final String id, final EventStreamClientFactory clientFactory, final String readerGroupName, final ConcurrentLinkedQueue<Long> readResult, final AtomicLong writeCount, final AtomicLong readCount, final AtomicBoolean exitFlag) { return CompletableFuture.runAsync(() -> { final EventStreamReader<Long> reader = clientFactory.createReader(id, readerGroupName, new JavaSerializer<Long>(), ReaderConfig.builder().build()); while (!(exitFlag.get() && readCount.get() == writeCount.get())) { final Long longEvent = reader.readNextEvent(SECONDS.toMillis(2)).getEvent(); log.info("Reading event {}", longEvent); if (longEvent != null) { //update if event read is not null. readResult.add(longEvent); readCount.incrementAndGet(); } } log.info("Closing reader {}", reader); reader.close(); }, readerPool); }
Example #8
Source File: StreamTransactionMetadataTasks.java From pravega with Apache License 2.0 | 6 votes |
/** * Initializes stream writers for commit and abort streams. * This method should be called immediately after creating StreamTransactionMetadataTasks object. * * @param clientFactory Client factory reference. * @param config Controller event processor configuration. */ @Synchronized public void initializeStreamWriters(final EventStreamClientFactory clientFactory, final ControllerEventProcessorConfig config) { if (!commitWriterFuture.isDone()) { commitWriterFuture.complete(clientFactory.createEventWriter( config.getCommitStreamName(), ControllerEventProcessors.COMMIT_EVENT_SERIALIZER, EventWriterConfig.builder().build())); } if (!abortWriterFuture.isDone()) { abortWriterFuture.complete(clientFactory.createEventWriter( config.getAbortStreamName(), ControllerEventProcessors.ABORT_EVENT_SERIALIZER, EventWriterConfig.builder().build())); } this.setReady(); }
Example #9
Source File: StreamCutsTest.java From pravega with Apache License 2.0 | 5 votes |
private List<Map<Stream, StreamCut>> writeEventsAndCheckSlices(EventStreamClientFactory clientFactory, ReaderGroup readerGroup, ReaderGroupManager readerGroupManager, int parallelSegments) { // First, write half of total events before scaling (1/4 in each Stream). writeEvents(clientFactory, STREAM_ONE, TOTAL_EVENTS / 4); writeEvents(clientFactory, STREAM_TWO, TOTAL_EVENTS / 4); log.info("Finished writing events to streams."); Map<Stream, StreamCut> initialPosition = new HashMap<>(readerGroup.getStreamCuts()); log.info("Creating StreamCuts from: {}.", initialPosition); // Get StreamCuts for each slice from both Streams at the same time (may be different in each execution). List<Map<Stream, StreamCut>> streamSlices = getStreamCutSlices(clientFactory, readerGroup, TOTAL_EVENTS / 2); streamSlices.add(0, initialPosition); log.info("Finished creating StreamCuts {}.", streamSlices); // Ensure that reader groups can correctly read slice by slice from different Streams. readSliceBySliceAndVerify(readerGroupManager, clientFactory, parallelSegments, streamSlices); log.info("Finished checking sequentially slice by slice."); // Perform different combinations of StreamCuts and verify that read event boundaries are still correct. combineSlicesAndVerify(readerGroupManager, clientFactory, parallelSegments, streamSlices); log.info("Finished checking StreamCut combinations."); // Test that a reader group can be reset correctly. ReaderGroupConfig firstSliceConfig = ReaderGroupConfig.builder() .stream(Stream.of(SCOPE, STREAM_ONE)) .stream(Stream.of(SCOPE, STREAM_TWO)) .startingStreamCuts(initialPosition) .endingStreamCuts(streamSlices.get(streamSlices.size() - 1)).build(); readerGroup.resetReaderGroup(firstSliceConfig); log.info("Resetting existing reader group {} to stream cut {}.", READER_GROUP, initialPosition); final int readEvents = readAllEvents(readerGroupManager, clientFactory, readerGroup.getGroupName(), parallelSegments ).stream().map(CompletableFuture::join).reduce(Integer::sum).get(); assertEquals("Expected read events: ", TOTAL_EVENTS / 2, readEvents); return streamSlices; }
Example #10
Source File: SetupUtils.java From flink-connectors with Apache License 2.0 | 5 votes |
/** * Create a stream writer for writing Integer events. * * @param streamName Name of the test stream. * * @return Stream writer instance. */ public EventStreamWriter<Integer> getIntegerWriter(final String streamName) { Preconditions.checkState(this.started.get(), "Services not yet started"); Preconditions.checkNotNull(streamName); EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(this.scope, getClientConfig()); return clientFactory.createEventWriter( streamName, new IntegerSerializer(), EventWriterConfig.builder().build()); }
Example #11
Source File: ReaderCheckpointTest.java From pravega with Apache License 2.0 | 5 votes |
private <T extends Serializable> void writeEvents(final String scope, final List<T> events) { ClientConfig clientConfig = Utils.buildClientConfig(controllerURI); try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig); EventStreamWriter<T> writer = clientFactory.createEventWriter(STREAM, new JavaSerializer<T>(), EventWriterConfig.builder().build())) { for (T event : events) { String routingKey = String.valueOf(event); log.info("Writing message: {} with routing-key: {} to stream {}", event, routingKey, STREAM); writer.writeEvent(routingKey, event); } } }
Example #12
Source File: StreamMetadataTasks.java From pravega with Apache License 2.0 | 5 votes |
@Synchronized public void initializeStreamWriters(final EventStreamClientFactory clientFactory, final String streamName) { this.requestStreamName = streamName; requestEventWriterRef.set(clientFactory.createEventWriter(requestStreamName, ControllerEventProcessors.CONTROLLER_EVENT_SERIALIZER, EventWriterConfig.builder().build())); writerInitFuture.complete(null); }
Example #13
Source File: EndToEndReaderGroupTest.java From pravega with Apache License 2.0 | 5 votes |
private void writeTestEvent(String scope, String streamName, int eventId) { @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build()); @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build()); writer.writeEvent( "0", Integer.toString(eventId)).join(); }
Example #14
Source File: AutoScaleProcessor.java From pravega with Apache License 2.0 | 5 votes |
private void bootstrapRequestWriters(EventStreamClientFactory clientFactory, ScheduledExecutorService executor) { AtomicReference<EventStreamWriter<AutoScaleEvent>> w = new AtomicReference<>(); Futures.completeAfter(() -> Retry.indefinitelyWithExpBackoff(100, 10, 10000, this::handleBootstrapException) .runInExecutor(() -> bootstrapOnce(clientFactory, w), executor).thenApply(v -> w.get()), writer); }
Example #15
Source File: AutoScaleProcessor.java From pravega with Apache License 2.0 | 5 votes |
/** * Creates a new instance of the {@link AutoScaleProcessor} class. * * @param configuration The {@link AutoScalerConfig} to use as configuration. * @param clientFactory The {@link EventStreamClientFactory} to use to bootstrap {@link EventStreamWriter} instances. * @param executor The Executor to use for async operations. */ @VisibleForTesting AutoScaleProcessor(@NonNull AutoScalerConfig configuration, EventStreamClientFactory clientFactory, @NonNull ScheduledExecutorService executor) { this.configuration = configuration; this.writer = new CompletableFuture<>(); this.clientFactory = clientFactory; this.startInitWriter = new AtomicBoolean(false); this.cache = CacheBuilder.newBuilder() .initialCapacity(INITIAL_CAPACITY) .maximumSize(MAX_CACHE_SIZE) .expireAfterAccess(configuration.getCacheExpiry().getSeconds(), TimeUnit.SECONDS) .removalListener(RemovalListeners.asynchronous((RemovalListener<String, Pair<Long, Long>>) notification -> { if (notification.getCause().equals(RemovalCause.EXPIRED)) { triggerScaleDown(notification.getKey(), true); } }, executor)) .build(); // Even if there is no activity, keep cleaning up the cache so that scale down can be triggered. // caches do not perform clean up if there is no activity. This is because they do not maintain their // own background thread. this.cacheCleanup = executor.scheduleAtFixedRate(cache::cleanUp, 0, configuration.getCacheCleanup().getSeconds(), TimeUnit.SECONDS); if (clientFactory != null) { bootstrapRequestWriters(clientFactory, executor); } }
Example #16
Source File: AutoScaleMonitor.java From pravega with Apache License 2.0 | 5 votes |
@VisibleForTesting public AutoScaleMonitor(@NonNull StreamSegmentStore store, @NonNull EventStreamClientFactory clientFactory, @NonNull AutoScalerConfig configuration) { this.executor = ExecutorServiceHelpers.newScheduledThreadPool(configuration.getThreadPoolSize(), "auto-scaler"); this.processor = new AutoScaleProcessor(configuration, clientFactory, this.executor); this.statsRecorder = new SegmentStatsRecorderImpl(this.processor, store, this.executor); this.tableSegmentStatsRecorder = new TableSegmentStatsRecorderImpl(); }
Example #17
Source File: ConsoleReader.java From pravega-samples with Apache License 2.0 | 5 votes |
/** * This method continuously performs two tasks: first, it reads events that are being written by console writer * or by any other process in that stream. Second, it creates a new StreamCut after every read event. The new * {@link StreamCut} represents the current tail of the {@link Stream} and it may be used to read events to or from * that position in the {@link Stream}. */ public void run() { final ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder().disableAutomaticCheckpoints() .stream(Stream.of(scope, streamName)).build(); try (ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, controllerURI); EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build())) { // Create the ReaderGroup to which readers will belong to. readerGroupManager.createReaderGroup(readerGroupName, readerGroupConfig); @Cleanup ReaderGroup readerGroup = readerGroupManager.getReaderGroup(readerGroupName); EventStreamReader<String> reader = clientFactory.createReader("backgroundReader", readerGroupName, new JavaSerializer<>(), ReaderConfig.builder().build()); EventRead<String> event; // Start main loop to continuously read and display events written to the scope/stream. log.info("Start reading events from {}/{}.", scope, streamName); do { event = reader.readNextEvent(READER_TIMEOUT_MS); if (event.getEvent() != null) { // TODO: Problem finding logback.xml in Pravega example applications (Issue #87). System.out.println("[BackgroundReader] Read event: " + event.getEvent()); log.info("[BackgroundReader] Read event: {}.", event.getEvent()); } // Update the StreamCut after every event read, just in case the user wants to use it. if (!event.isCheckpoint()) { readerGroup.initiateCheckpoint("myCheckpoint" + System.nanoTime(), executor) .thenAccept(checkpoint -> lastStreamCut.set(checkpoint.asImpl().getPositions())); } } while (!end.get()); } catch (ReinitializationRequiredException e) { // We do not expect this Exception from the reader in this situation, so we leave. log.error("Non-expected reader re-initialization."); } }
Example #18
Source File: FlinkPravegaWriter.java From flink-connectors with Apache License 2.0 | 5 votes |
AbstractInternalWriter(EventStreamClientFactory clientFactory, boolean txnWriter) { Serializer<T> eventSerializer = new FlinkSerializer<>(serializationSchema); EventWriterConfig writerConfig = EventWriterConfig.builder() .transactionTimeoutTime(txnLeaseRenewalPeriod) .build(); watermark = Long.MIN_VALUE; if (txnWriter) { pravegaTxnWriter = clientFactory.createTransactionalEventWriter(writerId(), stream.getStreamName(), eventSerializer, writerConfig); } else { pravegaWriter = clientFactory.createEventWriter(writerId(), stream.getStreamName(), eventSerializer, writerConfig); } }
Example #19
Source File: StreamTransactionMetadataTasksTest.java From pravega with Apache License 2.0 | 5 votes |
private <T extends ControllerEvent> void createEventProcessor(final String readerGroupName, final String streamName, final EventStreamReader<T> reader, final EventStreamWriter<T> writer, Supplier<EventProcessor<T>> factory) throws CheckpointStoreException { EventStreamClientFactory clientFactory = Mockito.mock(EventStreamClientFactory.class); Mockito.when(clientFactory.<T>createReader(anyString(), anyString(), any(), any())).thenReturn(reader); Mockito.when(clientFactory.<T>createEventWriter(anyString(), any(), any())).thenReturn(writer); ReaderGroup readerGroup = Mockito.mock(ReaderGroup.class); Mockito.when(readerGroup.getGroupName()).thenReturn(readerGroupName); ReaderGroupManager readerGroupManager = Mockito.mock(ReaderGroupManager.class); EventProcessorSystemImpl system = new EventProcessorSystemImpl("system", "host", SCOPE, clientFactory, readerGroupManager); EventProcessorGroupConfig eventProcessorConfig = EventProcessorGroupConfigImpl.builder() .eventProcessorCount(1) .readerGroupName(readerGroupName) .streamName(streamName) .checkpointConfig(CheckpointConfig.periodic(1, 1)) .build(); EventProcessorConfig<T> config = EventProcessorConfig.<T>builder() .config(eventProcessorConfig) .decider(ExceptionHandler.DEFAULT_EXCEPTION_HANDLER) .serializer(new EventSerializer<>()) .supplier(factory) .build(); system.createEventProcessorGroup(config, CheckpointStoreFactory.createInMemoryStore(), executor); }
Example #20
Source File: ClientReader.java From pravega with Apache License 2.0 | 5 votes |
/** * Creates a new instance of the ClientReader class. * * @param controllerUri The Controller's URI. * @param clientFactory A ClientFactory to use. * @param executor An executor to use for background async operations. */ ClientReader(URI controllerUri, TestConfig testConfig, EventStreamClientFactory clientFactory, ScheduledExecutorService executor) { this.controllerUri = Preconditions.checkNotNull(controllerUri, "controllerUri"); this.testConfig = Preconditions.checkNotNull(testConfig, "testConfig"); this.clientFactory = Preconditions.checkNotNull(clientFactory, "clientFactory"); this.executor = Preconditions.checkNotNull(executor, "executor"); this.readers = new HashMap<>(); }
Example #21
Source File: StreamCutsTest.java From pravega with Apache License 2.0 | 5 votes |
/** * This method performs slices in streams of non-consecutive StreamCuts. For instance, say that we generate 5 cuts * in this order: C1, C2, C3, C4, C5. We then read slices of a stream formed like this: * * [C1, C3), [C1, C4), [C1, C5) * [C2, C4), [C2, C5) * [C3, C5) * * Note that all the consecutive slices have been previously tested, so we avoid them to shorten test execution. * Moreover, take into account that a increase in the number of slices greatly lengthen execution time. * * @param manager Group manager for this scope. * @param clientFactory Client factory to instantiate new readers. * @param parallelSegments Number of parallel segments that indicates the number of parallel readers to instantiate. * @param streamSlices StreamCuts lists to be combined and tested via bounded processing. */ private void combineSlicesAndVerify(ReaderGroupManager manager, EventStreamClientFactory clientFactory, int parallelSegments, List<Map<Stream, StreamCut>> streamSlices) { for (int i = 0; i < streamSlices.size() - 1; i++) { List<Map<Stream, StreamCut>> combinationSlices = new ArrayList<>(streamSlices).subList(i, streamSlices.size()); ReaderGroupConfig.ReaderGroupConfigBuilder configBuilder = ReaderGroupConfig.builder() .stream(Stream.of(SCOPE, STREAM_ONE)) .stream(Stream.of(SCOPE, STREAM_TWO)) .startingStreamCuts(combinationSlices.remove(0)); // Remove the contiguous StreamCut to the starting one, as the slice [CN, CN+1) has been already tested. combinationSlices.remove(0); // The minimum slice we are going to test is twice the size of CUT_SIZE. int readEvents, combinationCutSize = 2; for (Map<Stream, StreamCut> endingPoint : combinationSlices) { configBuilder = configBuilder.endingStreamCuts(endingPoint); // Create a new reader group per stream cut slice and read in parallel only events within the cut. final String readerGroupId = READER_GROUP + "CombSize" + String.valueOf(combinationCutSize) + "-" + System.nanoTime(); manager.createReaderGroup(readerGroupId, configBuilder.build()); log.debug("Reading events between starting StreamCut {} and ending StreamCut {}", configBuilder.build().getStartingStreamCuts(), endingPoint); readEvents = readAllEvents(manager, clientFactory, readerGroupId, parallelSegments).stream() .map(CompletableFuture::join) .reduce(Integer::sum).get(); log.debug("Read events by group {}: {}.", readerGroupId, readEvents); assertEquals("Expected events read: ", combinationCutSize * CUT_SIZE, readEvents); combinationCutSize++; } } }
Example #22
Source File: ReaderGroupStreamCutUpdateTest.java From pravega with Apache License 2.0 | 5 votes |
private void writeEvents(EventStreamClientFactory clientFactory, String streamName, int totalEvents, int offset) { @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build()); for (int i = offset; i < totalEvents; i++) { writer.writeEvent(String.valueOf(i)).join(); log.info("Writing event: {} to stream {}", i, streamName); } }
Example #23
Source File: BoundedStreamReaderTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testReaderGroupWithSameBounds() throws Exception { createScope(SCOPE); createStream(STREAM1); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build()); @Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM1, serializer, EventWriterConfig.builder().build()); // 1. Prep the stream with data. // Write events with event size of 30 writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get(); // 2. Create a StreamCut Pointing to offset 30L StreamCut streamCut = getStreamCut(STREAM1, 30L, 0); // 3. Create a ReaderGroup where the lower and upper bound are the same. @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri); groupManager.createReaderGroup("group", ReaderGroupConfig .builder().disableAutomaticCheckpoints() .stream(Stream.of(SCOPE, STREAM1), streamCut, streamCut) .build()); // 4. Create a reader @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build()); // 5. Verify if configuration is enforced. Assert.assertNull("Null is expected", reader.readNextEvent(1000).getEvent()); }
Example #24
Source File: BatchClientTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(timeout = 50000) @SuppressWarnings("deprecation") public void testBatchClientWithStreamTruncation() throws InterruptedException, ExecutionException { StreamManager streamManager = StreamManager.create(clientConfig); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig); createTestStreamWithEvents(clientFactory); log.info("Done creating a test stream with test events"); @Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig); log.info("Done creating batch client factory"); // 1. Create a StreamCut after 2 events(offset = 2 * 30 = 60). StreamCut streamCut60L = new StreamCutImpl(Stream.of(SCOPE, STREAM), ImmutableMap.of(new Segment(SCOPE, STREAM, 0), 60L)); // 2. Truncate stream. assertTrue("truncate stream", controllerWrapper.getController().truncateStream(SCOPE, STREAM, streamCut60L).join()); // 3a. Fetch Segments using StreamCut.UNBOUNDED> ArrayList<SegmentRange> segmentsPostTruncation1 = Lists.newArrayList(batchClient.getSegments(Stream.of(SCOPE, STREAM), StreamCut.UNBOUNDED, StreamCut.UNBOUNDED).getIterator()); // 3b. Fetch Segments using getStreamInfo() api. StreamInfo streamInfo = streamManager.getStreamInfo(SCOPE, STREAM); ArrayList<SegmentRange> segmentsPostTruncation2 = Lists.newArrayList(batchClient.getSegments(Stream.of(SCOPE, STREAM), streamInfo.getHeadStreamCut(), streamInfo.getTailStreamCut()).getIterator()); // Validate results. validateSegmentCountAndEventCount(batchClient, segmentsPostTruncation1); validateSegmentCountAndEventCount(batchClient, segmentsPostTruncation2); }
Example #25
Source File: BatchClientTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(expected = TruncatedDataException.class, timeout = 50000) public void testBatchClientWithStreamTruncationPostGetSegments() throws InterruptedException, ExecutionException { @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig); createTestStreamWithEvents(clientFactory); @Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig); // 1. Fetch Segments. ArrayList<SegmentRange> segmentsPostTruncation = Lists.newArrayList( batchClient.getSegments(Stream.of(SCOPE, STREAM), StreamCut.UNBOUNDED, StreamCut.UNBOUNDED).getIterator()); // 2. Create a StreamCut at the end of segment 0 ( offset = 3 * 30 = 90) StreamCut streamCut90L = new StreamCutImpl(Stream.of(SCOPE, STREAM), ImmutableMap.of(new Segment(SCOPE, STREAM, 0), 90L)); // 3. Truncate stream. assertTrue("truncate stream", controllerWrapper.getController().truncateStream(SCOPE, STREAM, streamCut90L).join()); // 4. Use SegmentRange obtained before truncation. SegmentRange s0 = segmentsPostTruncation.stream().filter( segmentRange -> segmentRange.getSegmentId() == 0L).findFirst().get(); // 5. Read non existent segment. List<String> eventList = new ArrayList<>(); @Cleanup SegmentIterator<String> segmentIterator = batchClient.readSegment(s0, serializer); eventList.addAll(Lists.newArrayList(segmentIterator)); }
Example #26
Source File: BatchClientTest.java From pravega with Apache License 2.0 | 5 votes |
protected void listAndReadSegmentsUsingBatchClient(String scopeName, String streamName, ClientConfig config) throws InterruptedException, ExecutionException { @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scopeName, config); createTestStreamWithEvents(clientFactory); log.info("Done creating test event stream with test events"); @Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(scopeName, config); // List out all the segments in the stream. ArrayList<SegmentRange> segments = Lists.newArrayList( batchClient.getSegments(Stream.of(scopeName, streamName), null, null).getIterator()); assertEquals("Expected number of segments", 6, segments.size()); // Batch read all events from stream. List<String> batchEventList = new ArrayList<>(); segments.forEach(segInfo -> { @Cleanup SegmentIterator<String> segmentIterator = batchClient.readSegment(segInfo, serializer); batchEventList.addAll(Lists.newArrayList(segmentIterator)); }); assertEquals("Event count", 9, batchEventList.size()); // Read from a given offset. Segment seg0 = new Segment(scopeName, streamName, 0); SegmentRange seg0Info = SegmentRangeImpl.builder().segment(seg0).startOffset(60).endOffset(90).build(); @Cleanup SegmentIterator<String> seg0Iterator = batchClient.readSegment(seg0Info, serializer); ArrayList<String> dataAtOffset = Lists.newArrayList(seg0Iterator); assertEquals(1, dataAtOffset.size()); assertEquals(DATA_OF_SIZE_30, dataAtOffset.get(0)); }
Example #27
Source File: BatchClientTest.java From pravega with Apache License 2.0 | 5 votes |
protected void createTestStreamWithEvents(EventStreamClientFactory clientFactory) throws InterruptedException, ExecutionException { createStream(); @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, serializer, EventWriterConfig.builder().build()); // write events to stream with 1 segment. write30ByteEvents(3, writer); // scale up and write events. Map<Double, Double> map = new HashMap<>(); map.put(0.0, 0.33); map.put(0.33, 0.66); map.put(0.66, 1.0); Boolean result; assertTrue("Scale up operation", controllerWrapper.getController().scaleStream(Stream.of(SCOPE, STREAM), Collections.singletonList(0L), map, executor).getFuture().join()); write30ByteEvents(3, writer); //scale down and write events. map = new HashMap<>(); map.put(0.0, 0.5); map.put(0.5, 1.0); result = controllerWrapper.getController().scaleStream(Stream.of(SCOPE, STREAM), Arrays.asList(computeSegmentId(1, 1), computeSegmentId(2, 1), computeSegmentId(3, 1)), map, executor).getFuture().get(); assertTrue("Scale down operation result", result); write30ByteEvents(3, writer); }
Example #28
Source File: DebugStreamSegmentsTest.java From pravega with Apache License 2.0 | 5 votes |
private void randomScaleUpScaleDown(final EventStreamClientFactory clientFactory, final Controller controller) { @Cleanup EventStreamWriter<AutoScaleEvent> requestStreamWriter = clientFactory.createEventWriter("_requeststream", autoScaleEventSerializer, EventWriterConfig.builder() .build()); final Collection<Segment> currentSegments = controller.getCurrentSegments(SCOPE, STREAM).join().getSegments(); Assert.assertTrue("Current Number of segments cannot be zero", currentSegments.size() > 0); // fetch a randomSegment final Segment randomSegment = currentSegments.toArray(new Segment[0])[random.nextInt(currentSegments.size())]; AutoScaleEvent scaleEvent = null; if (random.nextBoolean()) { // trigger random scale up scaleEvent = new AutoScaleEvent(randomSegment.getScope(), randomSegment.getStreamName(), randomSegment.getSegmentId(), AutoScaleEvent.UP, System.currentTimeMillis(), 2, false, random.nextInt()); } else { // trigger random scale down. scaleEvent = new AutoScaleEvent(randomSegment.getScope(), randomSegment.getStreamName(), randomSegment.getSegmentId(), AutoScaleEvent.DOWN, System.currentTimeMillis(), 2, false, random.nextInt()); // silent=false } Futures.getAndHandleExceptions(requestStreamWriter.writeEvent(scaleEvent), t -> new RuntimeException("Error while writing scale event", t)); }
Example #29
Source File: ReadWriteUtils.java From pravega with Apache License 2.0 | 5 votes |
public static List<CompletableFuture<Integer>> readEvents(EventStreamClientFactory client, String rGroup, int numReaders, int limit) { List<EventStreamReader<String>> readers = new ArrayList<>(); for (int i = 0; i < numReaders; i++) { readers.add(client.createReader(String.valueOf(i), rGroup, new UTF8StringSerializer(), ReaderConfig.builder().build())); } return readers.stream().map(r -> CompletableFuture.supplyAsync(() -> { int count = readEvents(r, limit, 50); r.close(); return count; })).collect(toList()); }
Example #30
Source File: ReaderGroupTest.java From pravega with Apache License 2.0 | 5 votes |
public void writeEvents(int eventsToWrite, EventStreamClientFactory clientFactory) { @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().build()); for (int i = 0; i < eventsToWrite; i++) { writer.writeEvent(Integer.toString(i), " Event " + i); } writer.flush(); }