Java Code Examples for io.pravega.client.EventStreamClientFactory#withScope()
The following examples show how to use
io.pravega.client.EventStreamClientFactory#withScope() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HelloWorldWriter.java From pravega-samples with Apache License 2.0 | 6 votes |
public void run(String routingKey, String message) { StreamManager streamManager = StreamManager.create(controllerURI); final boolean scopeIsNew = streamManager.createScope(scope); StreamConfiguration streamConfig = StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.fixed(1)) .build(); final boolean streamIsNew = streamManager.createStream(scope, streamName, streamConfig); try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build()); EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<String>(), EventWriterConfig.builder().build())) { System.out.format("Writing message: '%s' with routing-key: '%s' to stream '%s / %s'%n", message, routingKey, scope, streamName); final CompletableFuture writeFuture = writer.writeEvent(routingKey, message); } }
Example 2
Source File: ReaderCheckpointTest.java From pravega with Apache License 2.0 | 6 votes |
private Checkpoint createCheckPointAndVerify(final ReaderGroup readerGroup, final String checkPointName) { log.info("Create and verify check point {}", checkPointName); String readerId = "checkPointReader"; CompletableFuture<Checkpoint> checkpoint = null; final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI); try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE_1, clientConfig); EventStreamReader<Integer> reader = clientFactory.createReader(readerId, READER_GROUP_NAME, new JavaSerializer<Integer>(), readerConfig)) { checkpoint = readerGroup.initiateCheckpoint(checkPointName, executor); //create checkpoint //verify checkpoint event. EventRead<Integer> event = reader.readNextEvent(READ_TIMEOUT); assertTrue("Read for Checkpoint event", (event != null) && (event.isCheckpoint())); assertEquals("CheckPoint Name", checkPointName, event.getCheckpointName()); } return checkpoint.join(); }
Example 3
Source File: DelegationTokenTest.java From pravega with Apache License 2.0 | 5 votes |
private void writeAnEvent(int tokenTtlInSeconds) throws ExecutionException, InterruptedException { ClusterWrapper pravegaCluster = new ClusterWrapper(true, tokenTtlInSeconds); try { pravegaCluster.initialize(); String scope = "testscope"; String streamName = "teststream"; int numSegments = 1; String message = "test message"; ClientConfig clientConfig = ClientConfig.builder() .controllerURI(URI.create(pravegaCluster.controllerUri())) .credentials(new DefaultCredentials("1111_aaaa", "admin")) .build(); log.debug("Done creating client config."); createScopeStream(scope, streamName, numSegments, clientConfig); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig); //@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<String>(), EventWriterConfig.builder().build()); // Note: A TokenException is thrown here if token verification fails on the server. writer.writeEvent(message).get(); log.debug("Done writing message '{}' to stream '{} / {}'", message, scope, streamName); } finally { pravegaCluster.close(); } }
Example 4
Source File: BatchClientTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(expected = TruncatedDataException.class, timeout = 50000) public void testBatchClientWithStreamTruncationPostGetSegments() throws InterruptedException, ExecutionException { @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig); createTestStreamWithEvents(clientFactory); @Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig); // 1. Fetch Segments. ArrayList<SegmentRange> segmentsPostTruncation = Lists.newArrayList( batchClient.getSegments(Stream.of(SCOPE, STREAM), StreamCut.UNBOUNDED, StreamCut.UNBOUNDED).getIterator()); // 2. Create a StreamCut at the end of segment 0 ( offset = 3 * 30 = 90) StreamCut streamCut90L = new StreamCutImpl(Stream.of(SCOPE, STREAM), ImmutableMap.of(new Segment(SCOPE, STREAM, 0), 90L)); // 3. Truncate stream. assertTrue("truncate stream", controllerWrapper.getController().truncateStream(SCOPE, STREAM, streamCut90L).join()); // 4. Use SegmentRange obtained before truncation. SegmentRange s0 = segmentsPostTruncation.stream().filter( segmentRange -> segmentRange.getSegmentId() == 0L).findFirst().get(); // 5. Read non existent segment. List<String> eventList = new ArrayList<>(); @Cleanup SegmentIterator<String> segmentIterator = batchClient.readSegment(s0, serializer); eventList.addAll(Lists.newArrayList(segmentIterator)); }
Example 5
Source File: SimpleReader.java From pravega-samples with Apache License 2.0 | 5 votes |
public void run() { setRunning(true); final String readerGroup = UUID.randomUUID().toString().replace("-", ""); final ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder() .stream(Stream.of(scope, streamName)) .build(); try (ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, controllerURI)) { readerGroupManager.createReaderGroup(readerGroup, readerGroupConfig); } try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build()); EventStreamReader<T> reader = clientFactory.createReader("reader", readerGroup, serializer, ReaderConfig.builder().build())) { while (isRunning()) { try { EventRead<T> event = reader.readNextEvent(READER_TIMEOUT_MS); T eventData = event.getEvent(); if (eventData != null) { onNext.accept(event.getEvent()); } } catch (ReinitializationRequiredException e) { onError.accept(e); } } } }
Example 6
Source File: HelloWorldReader.java From pravega-samples with Apache License 2.0 | 5 votes |
public void run() { StreamManager streamManager = StreamManager.create(controllerURI); final boolean scopeIsNew = streamManager.createScope(scope); StreamConfiguration streamConfig = StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.fixed(1)) .build(); final boolean streamIsNew = streamManager.createStream(scope, streamName, streamConfig); final String readerGroup = UUID.randomUUID().toString().replace("-", ""); final ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder() .stream(Stream.of(scope, streamName)) .build(); try (ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, controllerURI)) { readerGroupManager.createReaderGroup(readerGroup, readerGroupConfig); } try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build()); EventStreamReader<String> reader = clientFactory.createReader("reader", readerGroup, new JavaSerializer<String>(), ReaderConfig.builder().build())) { System.out.format("Reading all the events from %s/%s%n", scope, streamName); EventRead<String> event = null; do { try { event = reader.readNextEvent(READER_TIMEOUT_MS); if (event.getEvent() != null) { System.out.format("Read event '%s'%n", event.getEvent()); } } catch (ReinitializationRequiredException e) { //There are certain circumstances where the reader needs to be reinitialized e.printStackTrace(); } } while (event.getEvent() != null); System.out.format("No more events from %s/%s%n", scope, streamName); } }
Example 7
Source File: PravegaFixedSegmentsOutputFormat.java From pravega-samples with Apache License 2.0 | 5 votes |
@Override public RecordWriter<String, V> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); final String scopeName = Optional.ofNullable(conf.get(OUTPUT_SCOPE_NAME)).orElseThrow(() -> new IOException("The input scope name must be configured (" + OUTPUT_SCOPE_NAME + ")")); final String streamName = Optional.ofNullable(conf.get(OUTPUT_STREAM_NAME)).orElseThrow(() -> new IOException("The input stream name must be configured (" + OUTPUT_STREAM_NAME + ")")); final URI controllerURI = Optional.ofNullable(conf.get(OUTPUT_URI_STRING)).map(URI::create).orElseThrow(() -> new IOException("The Pravega controller URI must be configured (" + OUTPUT_URI_STRING + ")")); final String deserializerClassName = Optional.ofNullable(conf.get(OUTPUT_DESERIALIZER)).orElseThrow(() -> new IOException("The event deserializer must be configured (" + OUTPUT_DESERIALIZER + ")")); final int segments = Integer.parseInt(conf.get(OUTPUT_STREAM_SEGMENTS, "3")); StreamManager streamManager = StreamManager.create(controllerURI); streamManager.createScope(scopeName); StreamConfiguration streamConfig = StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.fixed(segments)) .build(); streamManager.createStream(scopeName, streamName, streamConfig); EventStreamClientFactory clientFactory = (externalClientFactory != null) ? externalClientFactory : EventStreamClientFactory.withScope(scopeName, ClientConfig.builder().controllerURI(controllerURI).build()); Serializer deserializer; try { Class<?> deserializerClass = Class.forName(deserializerClassName); deserializer = (Serializer<V>) deserializerClass.newInstance(); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { log.error("Exception when creating deserializer: {}", e); throw new IOException( "Unable to create the event deserializer (" + deserializerClassName + ")", e); } EventStreamWriter<V> writer = clientFactory.createEventWriter(streamName, deserializer, EventWriterConfig.builder().build()); return new PravegaOutputRecordWriter<V>(writer); }
Example 8
Source File: StreamMetricsTest.java From pravega with Apache License 2.0 | 5 votes |
@Test(timeout = 30000) public void testTransactionMetrics() throws Exception { String txScopeName = "scopeTx"; String txStreamName = "streamTx"; controllerWrapper.getControllerService().createScope(txScopeName).get(); if (!controller.createStream(txScopeName, txStreamName, config).get()) { log.error("Stream {} for tx testing already existed, exiting", txScopeName + "/" + txStreamName); return; } @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(txScopeName, ClientConfig.builder() .controllerURI(URI.create("tcp://localhost:" + controllerPort)).build()); @Cleanup TransactionalEventStreamWriter<String> writer = clientFactory.createTransactionalEventWriter(Stream.of(txScopeName, txStreamName).getStreamName(), new JavaSerializer<>(), EventWriterConfig.builder().build()); Transaction<String> transaction = writer.beginTxn(); assertEquals(1, (long) MetricRegistryUtils.getCounter(MetricsNames.CREATE_TRANSACTION, streamTags(txScopeName, txStreamName)).count()); transaction.writeEvent("Test"); transaction.flush(); transaction.commit(); AssertExtensions.assertEventuallyEquals(true, () -> transaction.checkStatus().equals(Transaction.Status.COMMITTED), 10000); AssertExtensions.assertEventuallyEquals(true, () -> MetricRegistryUtils.getCounter(MetricsNames.COMMIT_TRANSACTION, streamTags(txScopeName, txStreamName)) != null, 10000); assertEquals(1, (long) MetricRegistryUtils.getCounter(MetricsNames.COMMIT_TRANSACTION, streamTags(txScopeName, txStreamName)).count()); Transaction<String> transaction2 = writer.beginTxn(); transaction2.writeEvent("Test"); transaction2.abort(); AssertExtensions.assertEventuallyEquals(true, () -> transaction2.checkStatus().equals(Transaction.Status.ABORTED), 10000); AssertExtensions.assertEventuallyEquals(true, () -> MetricRegistryUtils.getCounter(MetricsNames.ABORT_TRANSACTION, streamTags(txScopeName, txStreamName)) != null, 10000); assertEquals(1, (long) MetricRegistryUtils.getCounter(MetricsNames.ABORT_TRANSACTION, streamTags(txScopeName, txStreamName)).count()); }
Example 9
Source File: DataProducer.java From pravega-samples with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws InterruptedException { ParameterTool params = ParameterTool.fromArgs(args); // The writer will contact with the Pravega controller to get information about segments. URI pravegaControllerURI = URI.create(params.get(Constants.CONTROLLER_ADDRESS_PARAM, Constants.CONTROLLER_ADDRESS)); final int numEvents = params.getInt(Constants.NUM_EVENTS_PARAM, Constants.NUM_EVENTS); // StreamManager helps us to easily manage streams and copes. StreamManager streamManager = StreamManager.create(pravegaControllerURI); // A scope is a namespace that will be used to group streams (e.g., like dirs and files). streamManager.createScope(Constants.DEFAULT_SCOPE); // Here we configure the new stream (e.g., name, scope, scaling policy, retention policy). StreamConfiguration streamConfiguration = StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.fixed(Constants.PARALLELISM)) .build(); // Create a Pravega stream to write data (if it does not exist yet). streamManager.createStream(Constants.DEFAULT_SCOPE, Constants.PRODUCER_STREAM, streamConfiguration); // Create the client factory to instantiate writers and readers. try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(Constants.DEFAULT_SCOPE, ClientConfig.builder().controllerURI(pravegaControllerURI).build())) { // Create a writer to write events in the stream. EventStreamWriter<Tuple2<Integer, Double>> writer = clientFactory.createEventWriter(Constants.PRODUCER_STREAM, new JavaSerializer<>(), EventWriterConfig.builder().build()); for (double i = 0; i < numEvents * EVENT_VALUE_INCREMENT; i += EVENT_VALUE_INCREMENT) { // Write an event for each sensor. for (int sensorId = 0; sensorId < Constants.PARALLELISM; sensorId++) { // Different starting values per sensor. final Tuple2<Integer, Double> value = new Tuple2<>(sensorId, Math.sin(i + sensorId)); writer.writeEvent(String.valueOf(sensorId), value); LOG.warn("Writing event: {} (routing key {}).", value, sensorId); } writer.flush(); Thread.sleep(WRITER_SLEEP_MS); } writer.close(); } System.exit(0); }
Example 10
Source File: SliceProcessor.java From pravega-samples with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { // Initialize the parameter utility tool in order to retrieve input parameters. ParameterTool params = ParameterTool.fromArgs(args); // The writer will contact with the Pravega controller to get information about streams. URI pravegaControllerURI = URI.create(params.get(Constants.CONTROLLER_ADDRESS_PARAM, Constants.CONTROLLER_ADDRESS)); PravegaConfig pravegaConfig = PravegaConfig .fromParams(params) .withControllerURI(pravegaControllerURI) .withDefaultScope(Constants.DEFAULT_SCOPE); // Create the scope if it is not present. StreamManager streamManager = StreamManager.create(pravegaControllerURI); streamManager.createScope(Constants.DEFAULT_SCOPE); // We will read from the stream slices published by StreamBookmarker. ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder() .stream(Stream.of(Constants.DEFAULT_SCOPE, Constants.STREAMCUTS_STREAM)) .build(); // Instantiate the reader group manager to create the reader group and the client factory to create readers. try (ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(Constants.DEFAULT_SCOPE, pravegaControllerURI); EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(Constants.DEFAULT_SCOPE, ClientConfig.builder().controllerURI(pravegaControllerURI).build())) { // Create the reader group to read the stream slices. readerGroupManager.createReaderGroup(READER_GROUP_NAME, readerGroupConfig); EventStreamReader<SensorStreamSlice> sliceReader = clientFactory.createReader("sliceReader", READER_GROUP_NAME, new JavaSerializer<>(), ReaderConfig.builder().build()); // The application locally executes bounded batch jobs for every slice received from StreamBookmarker. Note // that this is only for simplifying the demo and have the three processes working in a loop; but, in a real // setting, we could pass a representation of the SensorStreamSlice object as input argument for a batch job. EventRead<SensorStreamSlice> sliceToAnalyze; do { sliceToAnalyze = sliceReader.readNextEvent(READER_TIMEOUT_MS); // If we got a new stream slice to process, run a new batch job on it. if (sliceToAnalyze.getEvent() != null) { LOG.warn("Running batch job for slice: {}.", sliceToAnalyze.getEvent()); triggerBatchJobOnSlice(pravegaConfig, sliceToAnalyze.getEvent()); } } while (sliceToAnalyze.isCheckpoint() || sliceToAnalyze.getEvent() != null); sliceReader.close(); } }
Example 11
Source File: PravegaWatermarkIngestion.java From pravega-samples with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws InterruptedException { ParameterTool params = ParameterTool.fromArgs(args); // The writer will contact with the Pravega controller to get information about segments. URI pravegaControllerURI = URI.create(params.get(Constants.CONTROLLER_ADDRESS_PARAM, Constants.CONTROLLER_ADDRESS)); // StreamManager helps us to easily manage streams and copes. StreamManager streamManager = StreamManager.create(pravegaControllerURI); // A scope is a namespace that will be used to group streams (e.g., like dirs and files). streamManager.createScope(Constants.DEFAULT_SCOPE); PravegaConfig pravegaConfig = PravegaConfig.fromDefaults() .withControllerURI(pravegaControllerURI) .withDefaultScope(Constants.DEFAULT_SCOPE); // Here we configure the new stream (e.g. scaling policy, retention policy). StreamConfiguration streamConfiguration = StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.fixed(Constants.PARALLELISM)) .build(); // Create a Pravega stream to write data (if it does not exist yet). streamManager.createStream(Constants.DEFAULT_SCOPE, Constants.RAW_DATA_STREAM, streamConfiguration); // Create the client factory to instantiate writers and readers. try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(Constants.DEFAULT_SCOPE, pravegaConfig.getClientConfig())) { // Create a writer to write events in the stream. EventStreamWriter<SensorData> writer = clientFactory.createEventWriter("watermarking-events", Constants.RAW_DATA_STREAM, new JavaSerializer<>(), EventWriterConfig.builder().build()); for (int i = 1; i <= Constants.EVENTS_NUMBER; i++) { // Write an event for each sensor. long eventTime = Constants.STARTING_EVENT_TIME + i * Constants.EVENT_TIME_PERIOD; for (int sensorId = 0; sensorId < Constants.SENSOR_NUMBER; sensorId++) { // Different starting values per sensor. final SensorData value = new SensorData(sensorId, Math.sin(i * Constants.EVENT_VALUE_INCREMENT + sensorId), eventTime); writer.writeEvent(String.valueOf(sensorId), value); LOG.warn("Writing event: {} (routing key {}).", value, sensorId); } writer.flush(); Thread.sleep(Constants.WRITER_SLEEP_MS); // Notify the event time every 20 events if (i % 20 == 0) { writer.noteTime(eventTime); } } writer.close(); } System.exit(0); }
Example 12
Source File: StreamSeekTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 50000) public void testStreamSeek() throws Exception { createScope(SCOPE); createStream(STREAM1); createStream(STREAM2); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build()); @Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM1, serializer, EventWriterConfig.builder().build()); @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri); groupManager.createReaderGroup("group", ReaderGroupConfig.builder() .disableAutomaticCheckpoints() .groupRefreshTimeMillis(0) .stream(Stream.of(SCOPE, STREAM1)) .stream(Stream.of(SCOPE, STREAM2)) .build()); @Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup("group"); //Prep the stream with data. //1.Write two events with event size of 30 writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get(); //2.Scale stream Map<Double, Double> newKeyRanges = new HashMap<>(); newKeyRanges.put(0.0, 0.33); newKeyRanges.put(0.33, 0.66); newKeyRanges.put(0.66, 1.0); scaleStream(STREAM1, newKeyRanges); //3.Write three events with event size of 30 writer1.writeEvent(keyGenerator.get(), getEventData.apply(3)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(4)).get(); writer1.writeEvent(keyGenerator.get(), getEventData.apply(5)).get(); //Create a reader @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build()); //Offset of a streamCut is always set to zero. Map<Stream, StreamCut> streamCut1 = readerGroup.getStreamCuts(); //Stream cut 1 readAndVerify(reader, 1, 2); assertNull(reader.readNextEvent(100).getEvent()); //Sees the segments are empty prior to scaling readerGroup.initiateCheckpoint("cp1", executor); //Checkpoint to move past the scale readAndVerify(reader, 3, 4, 5); // Old segments are released and new ones can be read Map<Stream, StreamCut> streamCut2 = readerGroup.getStreamCuts(); //Stream cut 2 readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(streamCut1).build()); //reset the readers to offset 0. verifyReinitializationRequiredException(reader); @Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build()); //verify that we are at streamCut1 readAndVerify(reader1, 1, 2); readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(streamCut2).build()); // reset readers to post scale offset 0 verifyReinitializationRequiredException(reader1); @Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build()); //verify that we are at streamCut2 readAndVerify(reader2, 3, 4, 5); }
Example 13
Source File: StreamCutsTest.java From pravega with Apache License 2.0 | 4 votes |
/** * This test verifies the correct operation of readers using StreamCuts. Concretely, the test creates two streams * with different number of segments and it writes some events (TOTAL_EVENTS / 2) in them. Then, the test creates a * list of StreamCuts that encompasses both streams every CUT_SIZE events. The test asserts that new groups of * readers can be initialized at these sequential StreamCut intervals and that only CUT_SIZE events are read. Also, * the test checks the correctness of different combinations of StreamCuts that have not been sequentially created. * After creating StreamCuts and tests the correctness of reads, the test also checks resetting a reader group to a * specific initial read point. The previous process is repeated twice: before and after scaling streams, to test if * StreamCuts work correctly under scaling events (thus writing TOTAL_EVENTS). Finally, this test checks reading * different StreamCut combinations in both streams for all events (encompassing events before and after scaling). */ @Test public void streamCutsTest() { final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig); @Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, clientConfig); readerGroupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder() .stream(Stream.of(SCOPE, STREAM_ONE)) .stream(Stream.of(SCOPE, STREAM_TWO)).build()); @Cleanup ReaderGroup readerGroup = readerGroupManager.getReaderGroup(READER_GROUP); // Perform write of events, slice by slice StreamCuts test and combinations StreamCuts test. log.info("Write, slice by slice and combinations test before scaling."); final int parallelismBeforeScale = RG_PARALLELISM_ONE + RG_PARALLELISM_TWO; List<Map<Stream, StreamCut>> slicesBeforeScale = writeEventsAndCheckSlices(clientFactory, readerGroup, readerGroupManager, parallelismBeforeScale); // Now, we perform a manual scale on both streams and wait until it occurs. CompletableFuture<Boolean> scaleStreamOne = scaleStream(SCOPE, STREAM_ONE, RG_PARALLELISM_ONE * 2, executor); checkScaleStatus(scaleStreamOne); // Perform again the same test on the stream segments after scaling. final int parallelSegmentsAfterScale = RG_PARALLELISM_ONE * 2 + RG_PARALLELISM_TWO; final String newReaderGroupName = READER_GROUP + "new"; final Map<Stream, StreamCut> streamCutBeforeScale = slicesBeforeScale.get(slicesBeforeScale.size() - 1); readerGroupManager.createReaderGroup(newReaderGroupName, ReaderGroupConfig.builder() .stream(Stream.of(SCOPE, STREAM_ONE)) .stream(Stream.of(SCOPE, STREAM_TWO)) .startingStreamCuts(streamCutBeforeScale).build()); @Cleanup ReaderGroup newReaderGroup = readerGroupManager.getReaderGroup(newReaderGroupName); log.info("Checking slices again starting from {}.", streamCutBeforeScale); List<Map<Stream, StreamCut>> slicesAfterScale = writeEventsAndCheckSlices(clientFactory, newReaderGroup, readerGroupManager, parallelSegmentsAfterScale); // Perform combinations including StreamCuts before and after the scale event. slicesAfterScale.remove(0); slicesBeforeScale.addAll(slicesAfterScale); log.info("Performing combinations in the whole stream."); combineSlicesAndVerify(readerGroupManager, clientFactory, parallelSegmentsAfterScale, slicesBeforeScale); log.info("All events correctly read from StreamCut slices on multiple Streams. StreamCuts test passed."); }
Example 14
Source File: SetupUtils.java From flink-connectors with Apache License 2.0 | 4 votes |
/** * Create a {@link EventStreamClientFactory} for this cluster and scope. */ public EventStreamClientFactory newClientFactory() { return EventStreamClientFactory.withScope(this.scope, getClientConfig()); }
Example 15
Source File: StreamCutsExample.java From pravega-samples with Apache License 2.0 | 4 votes |
/** * A {@link StreamCut} is a collection of offsets, one for each open segment of a set of {@link Stream}s, which * indicates an event boundary. With a {@link StreamCut}, users can instruct readers to read from and/or up to a * particular event boundary (e.g., read events from 100 to 200, events created since Tuesday) on multiple * {@link Stream}s. To this end, Pravega allows us to create {@link StreamCut}s while readers are reading. In this * method, we read create two {@link StreamCut}s for a {@link Stream} according to the initial and final event * indexes passed by parameter. * * @param streamName Name of the {@link Stream} from which {@link StreamCut}s will be created. * @param iniEventIndex Index of the initial boundary for the {@link Stream} slice to process. * @param endEventIndex Index of the final boundary for the {@link Stream} slice to process. * @return Initial and final {@link Stream} boundaries represented as {@link StreamCut}s. */ public List<StreamCut> createStreamCutsByIndexFor(String streamName, int iniEventIndex, int endEventIndex) { // Create the StreamCuts for the streams. final List<StreamCut> streamCuts = new ArrayList<>(); final String randomId = String.valueOf(new Random(System.nanoTime()).nextInt()); // Free resources after execution. try (ReaderGroupManager manager = ReaderGroupManager.withScope(scope, controllerURI); EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build())) { // Create a reader group and a reader to read from the stream. final String readerGroupName = streamName + randomId; ReaderGroupConfig config = ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build(); manager.createReaderGroup(readerGroupName, config); @Cleanup ReaderGroup readerGroup = manager.getReaderGroup(readerGroupName); @Cleanup EventStreamReader<String> reader = clientFactory.createReader(randomId, readerGroup.getGroupName(), new JavaSerializer<>(), ReaderConfig.builder().build()); // Read streams and create the StreamCuts during the read process. int eventIndex = 0; EventRead<String> event; do { // Here is where we create a StreamCut that points to the event indicated by the user. if (eventIndex == iniEventIndex || eventIndex == endEventIndex) { reader.close(); streamCuts.add(readerGroup.getStreamCuts().get(Stream.of(scope, streamName))); reader = clientFactory.createReader(randomId, readerGroup.getGroupName(), new JavaSerializer<>(), ReaderConfig.builder().build()); } event = reader.readNextEvent(1000); eventIndex++; } while (event.isCheckpoint() || event.getEvent() != null); // If there is only the initial StreamCut, this means that the final one is the tail of the stream. if (streamCuts.size() == 1) { streamCuts.add(StreamCut.UNBOUNDED); } } catch (ReinitializationRequiredException e) { // We do not expect this Exception from the reader in this situation, so we leave. log.error("Non-expected reader re-initialization."); } return streamCuts; }
Example 16
Source File: EndToEndReaderGroupTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 30000) public void testGenerateStreamCuts() throws Exception { final Stream stream = Stream.of(SCOPE, STREAM); final String group = "group"; createScope(SCOPE); createStream(SCOPE, STREAM, ScalingPolicy.fixed(1)); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerURI).build()); @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, serializer, EventWriterConfig.builder().build()); //Prep the stream with data. //1.Write events with event size of 30 writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(1)).join(); writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(2)).join(); writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(3)).join(); writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(4)).join(); @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerURI); groupManager.createReaderGroup(group, ReaderGroupConfig .builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(1000) .stream(stream) .build()); ReaderGroup readerGroup = groupManager.getReaderGroup(group); //Create a reader @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, serializer, ReaderConfig.builder().build()); readAndVerify(reader, 1); @Cleanup("shutdown") InlineExecutor backgroundExecutor = new InlineExecutor(); CompletableFuture<Map<Stream, StreamCut>> sc = readerGroup.generateStreamCuts(backgroundExecutor); // The reader group state will be updated after 1 second. TimeUnit.SECONDS.sleep(1); EventRead<String> data = reader.readNextEvent(15000); assertTrue(Futures.await(sc)); // wait until the streamCut is obtained. //expected segment 0 offset is 30L. Map<Segment, Long> expectedOffsetMap = ImmutableMap.of(getSegment(0, 0), 30L); Map<Stream, StreamCut> scMap = sc.join(); assertEquals("StreamCut for a single stream expected", 1, scMap.size()); assertEquals("StreamCut pointing ot offset 30L expected", new StreamCutImpl(stream, expectedOffsetMap), scMap.get(stream)); }
Example 17
Source File: StreamsAndScopesManagementTest.java From pravega with Apache License 2.0 | 4 votes |
private void testCreateSealAndDeleteStreams(String scope) { final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI); for (int j = 1; j <= NUM_STREAMS; j++) { final String stream = String.valueOf(j); StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(j)).build(); // Create Stream with nonexistent scope, which should not be successful. log.info("Creating a stream in a deliberately nonexistent scope nonexistentScope/{}.", stream); assertThrows(RuntimeException.class, () -> streamManager.createStream("nonexistentScope", stream, StreamConfiguration.builder().build())); long iniTime = System.nanoTime(); log.info("Creating stream {}/{}.", scope, stream); assertTrue("Creating stream", streamManager.createStream(scope, stream, config)); controllerPerfStats.get("createStreamMs").add(timeDiffInMs(iniTime)); // Update the configuration of the stream by doubling the number of segments. config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(j * 2)).build(); iniTime = System.nanoTime(); assertTrue(streamManager.updateStream(scope, stream, config)); controllerPerfStats.get("updateStreamMs").add(timeDiffInMs(iniTime)); // Perform tests on empty and non-empty streams. if (j % 2 == 0) { log.info("Writing events in stream {}/{}.", scope, stream); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig); writeEvents(clientFactory, stream, NUM_EVENTS); } // Update the configuration of the stream. config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(j * 2)).build(); assertTrue(streamManager.updateStream(scope, stream, config)); // Attempting to delete non-empty scope and non-sealed stream. assertThrows(RuntimeException.class, () -> streamManager.deleteScope(scope)); assertThrows(RuntimeException.class, () -> streamManager.deleteStream(scope, stream)); // Seal and delete stream. log.info("Attempting to seal and delete stream {}/{}.", scope, stream); iniTime = System.nanoTime(); assertTrue(streamManager.sealStream(scope, stream)); controllerPerfStats.get("sealStreamMs").add(timeDiffInMs(iniTime)); iniTime = System.nanoTime(); assertTrue(streamManager.deleteStream(scope, stream)); controllerPerfStats.get("deleteStreamMs").add(timeDiffInMs(iniTime)); // Seal and delete already sealed/deleted streams. log.info("Sealing and deleting an already deleted stream {}/{}.", scope, stream); assertThrows(RuntimeException.class, () -> streamManager.sealStream(scope, stream)); assertFalse(streamManager.deleteStream(scope, stream)); } }
Example 18
Source File: EndToEndTruncationTest.java From pravega with Apache License 2.0 | 4 votes |
/** * This test verifies that truncation works specifying an offset that applies to multiple segments. To this end, * the test first writes a set of events on a Stream (with multiple segments) and truncates it at a specified offset * (truncatedEvents). The tests asserts that readers gets a TruncatedDataException after truncation and then it * (only) reads the remaining events that have not been truncated. */ @Test(timeout = 600000) public void testParallelSegmentOffsetTruncation() { final String scope = "truncationTests"; final String streamName = "testParallelSegmentOffsetTruncation"; final int parallelism = 2; final int totalEvents = 100; final int truncatedEvents = 25; StreamConfiguration streamConf = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(parallelism)).build(); @Cleanup StreamManager streamManager = StreamManager.create(controllerURI); @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, controllerURI); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build()); streamManager.createScope(scope); // Test truncation in new and re-created tests. for (int i = 0; i < 2; i++) { final String readerGroupName = "RGTestParallelSegmentOffsetTruncation" + i; streamManager.createStream(scope, streamName, streamConf); groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints() .stream(Stream.of(scope, streamName)).build()); ReaderGroup readerGroup = groupManager.getReaderGroup(readerGroupName); // Write events to the Stream. writeEvents(clientFactory, streamName, totalEvents); // Instantiate readers to consume from Stream up to truncatedEvents. List<CompletableFuture<Integer>> futures = ReadWriteUtils.readEvents(clientFactory, readerGroupName, parallelism, truncatedEvents); Futures.allOf(futures).join(); // Perform truncation on stream segment Checkpoint cp = readerGroup.initiateCheckpoint("myCheckpoint" + i, executor).join(); StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next(); assertTrue(streamManager.truncateStream(scope, streamName, streamCut)); // Just after the truncation, trying to read the whole stream should raise a TruncatedDataException. final String newGroupName = readerGroupName + "new"; groupManager.createReaderGroup(newGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build()); futures = readEvents(clientFactory, newGroupName, parallelism); Futures.allOf(futures).join(); assertEquals("Expected read events: ", totalEvents - (truncatedEvents * parallelism), (int) futures.stream().map(CompletableFuture::join).reduce((a, b) -> a + b).get()); assertTrue(streamManager.sealStream(scope, streamName)); assertTrue(streamManager.deleteStream(scope, streamName)); } }
Example 19
Source File: SecureReader.java From pravega-samples with Apache License 2.0 | 4 votes |
public void read() throws ReinitializationRequiredException { /* * Note about setting the client config for HTTPS: * - The client config below is configured to use an optional truststore. The truststore is expected to be * the certificate of the certification authority (CA) that was used to sign the server certificates. * If this is null or empty, the default JVM trust store is used. In this demo, we use a provided * "cert.pem" as the CA certificate, which is also provided on the server-side. If the cluster uses a * different CA (which it should), use that CA's certificate as the truststore instead. * * - Also, the client config below disables host name verification. If the cluster's server certificates * have DNS names / IP addresses of the servers specified in them, you may turn this on. In a production * deployment, it is recommended to keep this on. * * Note about setting the client config for auth: * - The client config below is configured with an object of DefaultCredentials class. The user name * and password arguments passed to the object represent the credentials used for authentication * and authorization. The assumption we are making here is that the username is valid on the server, * the password is correct and the username has all the permissions necessary for performing the * subsequent operations. */ ClientConfig clientConfig = ClientConfig.builder() .controllerURI(this.controllerURI) // "tls://localhost:9090" // TLS-related client-side configuration .trustStore(this.truststorePath) .validateHostName(this.validateHostName) // Auth-related client-side configuration .credentials(new DefaultCredentials(this.password, this.username)) .build(); System.out.println("Done creating a client config."); // Everything below depicts the usual flow of reading events. All client-side security configuration is // done through the ClientConfig object as shown above. EventStreamClientFactory clientFactory = null; ReaderGroupManager readerGroupManager = null; EventStreamReader<String> reader = null; try { ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder() .stream(Stream.of(this.scope, this.stream)) .disableAutomaticCheckpoints() .build(); System.out.println("Done creating a reader group config with specified scope: [" + this.scope +"] and stream name: [" + this.stream + "]."); String readerGroupName = UUID.randomUUID().toString().replace("-", ""); readerGroupManager = ReaderGroupManager.withScope(this.scope, clientConfig); readerGroupManager.createReaderGroup(readerGroupName, readerGroupConfig); System.out.println("Done creating a reader group with specified name and config."); clientFactory = EventStreamClientFactory.withScope(this.scope, clientConfig); System.out.println("Done creating a client factory with the specified scope and client config."); reader = clientFactory.createReader("readerId", readerGroupName, new JavaSerializer<>(), ReaderConfig.builder().build()); System.out.println("Done creating a reader."); String readMessage = reader.readNextEvent(2000).getEvent(); System.out.println("Done reading an event: [" + readMessage + "]."); } finally { if (reader != null) reader.close(); if (clientFactory != null) clientFactory.close(); if (readerGroupManager != null) readerGroupManager.close(); } System.err.println("All done with reading! Exiting..."); }
Example 20
Source File: WatermarkingTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void progressingWatermarkWithWriterTimeouts() throws Exception { String scope = "Timeout"; String streamName = "Timeout"; int numSegments = 1; URI controllerUri = URI.create("tcp://localhost:" + controllerPort); ClientConfig clientConfig = ClientConfig.builder().controllerURI(controllerUri).build(); @Cleanup StreamManager streamManager = StreamManager.create(controllerUri); assertNotNull(streamManager); streamManager.createScope(scope); streamManager.createStream(scope, streamName, StreamConfiguration.builder() .scalingPolicy(ScalingPolicy.fixed(numSegments)) .build()); @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig); @Cleanup SynchronizerClientFactory syncClientFactory = SynchronizerClientFactory.withScope(scope, clientConfig); String markStream = NameUtils.getMarkStreamForStream(streamName); RevisionedStreamClient<Watermark> watermarkReader = syncClientFactory.createRevisionedStreamClient(markStream, new WatermarkSerializer(), SynchronizerConfig.builder().build()); LinkedBlockingQueue<Watermark> watermarks = new LinkedBlockingQueue<>(); AtomicBoolean stopFlag = new AtomicBoolean(false); fetchWatermarks(watermarkReader, watermarks, stopFlag); // create two writers and write two sevent and call note time for each writer. @Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build()); writer1.writeEvent("1").get(); writer1.noteTime(100L); @Cleanup EventStreamWriter<String> writer2 = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build()); writer2.writeEvent("2").get(); writer2.noteTime(102L); // writer0 should timeout. writer1 and writer2 should result in two more watermarks with following times: // 1: 100L-101L 2: 101-101 // then first writer should timeout and be discarded. But second writer should continue to be active as its time // is higher than first watermark. This should result in a second watermark to be emitted. AssertExtensions.assertEventuallyEquals(true, () -> watermarks.size() == 2, 100000); Watermark watermark1 = watermarks.poll(); Watermark watermark2 = watermarks.poll(); assertEquals(100L, watermark1.getLowerTimeBound()); assertEquals(102L, watermark1.getUpperTimeBound()); assertEquals(102L, watermark2.getLowerTimeBound()); assertEquals(102L, watermark2.getUpperTimeBound()); // stream cut should be same assertTrue(watermark2.getStreamCut().entrySet().stream().allMatch(x -> watermark1.getStreamCut().get(x.getKey()).equals(x.getValue()))); // bring back writer1 and post an event with note time smaller than current watermark writer1.writeEvent("3").get(); writer1.noteTime(101L); // no watermark should be emitted. Watermark nullMark = watermarks.poll(10, TimeUnit.SECONDS); assertNull(nullMark); }