org.apache.flink.util.SerializedValue Java Examples
The following examples show how to use
org.apache.flink.util.SerializedValue.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AkkaRpcActor.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void sendAsyncResponse(CompletableFuture<?> asyncResponse, String methodName) { final ActorRef sender = getSender(); Promise.DefaultPromise<Object> promise = new Promise.DefaultPromise<>(); asyncResponse.whenComplete( (value, throwable) -> { if (throwable != null) { promise.failure(throwable); } else { if (isRemoteSender(sender)) { Either<SerializedValue<?>, AkkaRpcException> serializedResult = serializeRemoteResultAndVerifySize(value, methodName); if (serializedResult.isLeft()) { promise.success(serializedResult.left()); } else { promise.failure(serializedResult.right()); } } else { promise.success(value); } } }); Patterns.pipe(promise.future(), getContext().dispatcher()).to(sender); }
Example #2
Source File: JobCheckpointingSettings.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public JobCheckpointingSettings( List<JobVertexID> verticesToTrigger, List<JobVertexID> verticesToAcknowledge, List<JobVertexID> verticesToConfirm, CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration, @Nullable SerializedValue<StateBackend> defaultStateBackend, @Nullable SerializedValue<MasterTriggerRestoreHook.Factory[]> masterHooks) { this.verticesToTrigger = requireNonNull(verticesToTrigger); this.verticesToAcknowledge = requireNonNull(verticesToAcknowledge); this.verticesToConfirm = requireNonNull(verticesToConfirm); this.checkpointCoordinatorConfiguration = Preconditions.checkNotNull(checkpointCoordinatorConfiguration); this.defaultStateBackend = defaultStateBackend; this.masterHooks = masterHooks; }
Example #3
Source File: AccumulatorHelper.java From flink with Apache License 2.0 | 6 votes |
/** * Takes the serialized accumulator results and tries to deserialize them using the provided * class loader. * @param serializedAccumulators The serialized accumulator results. * @param loader The class loader to use. * @return The deserialized accumulator results. * @throws IOException * @throws ClassNotFoundException */ public static Map<String, OptionalFailure<Object>> deserializeAccumulators( Map<String, SerializedValue<OptionalFailure<Object>>> serializedAccumulators, ClassLoader loader) throws IOException, ClassNotFoundException { if (serializedAccumulators == null || serializedAccumulators.isEmpty()) { return Collections.emptyMap(); } Map<String, OptionalFailure<Object>> accumulators = new HashMap<>(serializedAccumulators.size()); for (Map.Entry<String, SerializedValue<OptionalFailure<Object>>> entry : serializedAccumulators.entrySet()) { OptionalFailure<Object> value = null; if (entry.getValue() != null) { value = entry.getValue().deserializeValue(loader); } accumulators.put(entry.getKey(), value); } return accumulators; }
Example #4
Source File: AbstractFetcherWatermarksTest.java From flink with Apache License 2.0 | 6 votes |
TestFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithStartOffsets, SerializedValue<WatermarkStrategy<T>> watermarkStrategy, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval) throws Exception { super( sourceContext, assignedPartitionsWithStartOffsets, watermarkStrategy, processingTimeProvider, autoWatermarkInterval, TestFetcher.class.getClassLoader(), new UnregisteredMetricsGroup(), false); }
Example #5
Source File: FlinkPravegaReaderTest.java From flink-connectors with Apache License 2.0 | 6 votes |
/** * Creates a {@link TestableFlinkPravegaReader} with event time and watermarking. */ private static TestableFlinkPravegaReader<Integer> createReaderWithWatermark(AssignerWithTimeWindows<Integer> assignerWithTimeWindows) { ClientConfig clientConfig = ClientConfig.builder().build(); ReaderGroupConfig rgConfig = ReaderGroupConfig.builder().stream(SAMPLE_STREAM).build(); boolean enableMetrics = true; try { ClosureCleaner.clean(assignerWithTimeWindows, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true); SerializedValue<AssignerWithTimeWindows<Integer>> serializedAssigner = new SerializedValue<>(assignerWithTimeWindows); return new TestableFlinkPravegaReader<>( "hookUid", clientConfig, rgConfig, SAMPLE_SCOPE, GROUP_NAME, DESERIALIZATION_SCHEMA, serializedAssigner, READER_TIMEOUT, CHKPT_TIMEOUT, enableMetrics); } catch (IOException e) { throw new IllegalArgumentException("The given assigner is not serializable", e); } }
Example #6
Source File: RestClusterClientTest.java From flink with Apache License 2.0 | 6 votes |
@Override @SuppressWarnings("unchecked") protected CompletableFuture<ClientCoordinationResponseBody> handleRequest(@Nonnull HandlerRequest<ClientCoordinationRequestBody, ClientCoordinationMessageParameters> request, @Nonnull DispatcherGateway gateway) throws RestHandlerException { try { TestCoordinationRequest req = (TestCoordinationRequest) request .getRequestBody() .getSerializedCoordinationRequest() .deserializeValue(getClass().getClassLoader()); TestCoordinationResponse resp = new TestCoordinationResponse(req.payload); return CompletableFuture.completedFuture( new ClientCoordinationResponseBody( new SerializedValue<>(resp))); } catch (Exception e) { return FutureUtils.completedExceptionally(e); } }
Example #7
Source File: AkkaRpcActor.java From flink with Apache License 2.0 | 6 votes |
private Either<SerializedValue<?>, AkkaRpcException> serializeRemoteResultAndVerifySize(Object result, String methodName) { try { SerializedValue<?> serializedResult = new SerializedValue<>(result); long resultSize = serializedResult.getByteArray().length; if (resultSize > maximumFramesize) { return Either.Right(new AkkaRpcException( "The method " + methodName + "'s result size " + resultSize + " exceeds the maximum size " + maximumFramesize + " .")); } else { return Either.Left(serializedResult); } } catch (IOException e) { return Either.Right(new AkkaRpcException( "Failed to serialize the result for RPC call : " + methodName + '.', e)); } }
Example #8
Source File: TaskDeploymentDescriptorTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testOffLoadedAndNonOffLoadedPayload() { final TaskDeploymentDescriptor taskDeploymentDescriptor = createTaskDeploymentDescriptor( new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobInformation), new TaskDeploymentDescriptor.Offloaded<>(new PermanentBlobKey())); SerializedValue<JobInformation> actualSerializedJobInformation = taskDeploymentDescriptor.getSerializedJobInformation(); assertThat(actualSerializedJobInformation, is(serializedJobInformation)); try { taskDeploymentDescriptor.getSerializedTaskInformation(); fail("Expected to fail since the task information should be offloaded."); } catch (IllegalStateException expected) { // expected } }
Example #9
Source File: ExecutionJobVertex.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public Either<SerializedValue<TaskInformation>, PermanentBlobKey> getTaskInformationOrBlobKey() throws IOException { // only one thread should offload the task information, so let's also let only one thread // serialize the task information! synchronized (stateMonitor) { if (taskInformationOrBlobKey == null) { final BlobWriter blobWriter = graph.getBlobWriter(); final TaskInformation taskInformation = new TaskInformation( jobVertex.getID(), jobVertex.getName(), parallelism, maxParallelism, jobVertex.getInvokableClassName(), jobVertex.getConfiguration()); taskInformationOrBlobKey = BlobWriter.serializeAndTryOffload( taskInformation, getJobId(), blobWriter); } return taskInformationOrBlobKey; } }
Example #10
Source File: AbstractFetcher.java From flink with Apache License 2.0 | 6 votes |
/** * Shortcut variant of {@link #createPartitionStateHolders(Map, int, SerializedValue, SerializedValue, ClassLoader)} * that uses the same offset for all partitions when creating their state holders. */ private List<KafkaTopicPartitionState<KPH>> createPartitionStateHolders( List<KafkaTopicPartition> partitions, long initialOffset, int timestampWatermarkMode, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ClassLoader userCodeClassLoader) throws IOException, ClassNotFoundException { Map<KafkaTopicPartition, Long> partitionsToInitialOffset = new HashMap<>(partitions.size()); for (KafkaTopicPartition partition : partitions) { partitionsToInitialOffset.put(partition, initialOffset); } return createPartitionStateHolders( partitionsToInitialOffset, timestampWatermarkMode, watermarksPeriodic, watermarksPunctuated, userCodeClassLoader); }
Example #11
Source File: TaskDeploymentDescriptorTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testOffLoadedAndNonOffLoadedPayload() { final TaskDeploymentDescriptor taskDeploymentDescriptor = createTaskDeploymentDescriptor( new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobInformation), new TaskDeploymentDescriptor.Offloaded<>(new PermanentBlobKey())); SerializedValue<JobInformation> actualSerializedJobInformation = taskDeploymentDescriptor.getSerializedJobInformation(); assertThat(actualSerializedJobInformation, is(serializedJobInformation)); try { taskDeploymentDescriptor.getSerializedTaskInformation(); fail("Expected to fail since the task information should be offloaded."); } catch (IllegalStateException expected) { // expected } }
Example #12
Source File: AbstractFetcher.java From flink with Apache License 2.0 | 6 votes |
/** * Shortcut variant of {@link #createPartitionStateHolders(Map, int, SerializedValue, ClassLoader)} * that uses the same offset for all partitions when creating their state holders. */ private List<KafkaTopicPartitionState<T, KPH>> createPartitionStateHolders( List<KafkaTopicPartition> partitions, long initialOffset, int timestampWatermarkMode, SerializedValue<WatermarkStrategy<T>> watermarkStrategy, ClassLoader userCodeClassLoader) throws IOException, ClassNotFoundException { Map<KafkaTopicPartition, Long> partitionsToInitialOffset = new HashMap<>(partitions.size()); for (KafkaTopicPartition partition : partitions) { partitionsToInitialOffset.put(partition, initialOffset); } return createPartitionStateHolders( partitionsToInitialOffset, timestampWatermarkMode, watermarkStrategy, userCodeClassLoader); }
Example #13
Source File: OperatorEventDispatcherImpl.java From flink with Apache License 2.0 | 6 votes |
void dispatchEventToHandlers(OperatorID operatorID, SerializedValue<OperatorEvent> serializedEvent) throws FlinkException { final OperatorEvent evt; try { evt = serializedEvent.deserializeValue(classLoader); } catch (IOException | ClassNotFoundException e) { throw new FlinkException("Could not deserialize operator event", e); } final OperatorEventHandler handler = handlers.get(operatorID); if (handler != null) { handler.handleOperatorEvent(evt); } else { throw new FlinkException("Operator not registered for operator events"); } }
Example #14
Source File: CoordinatorEventsExactlyOnceITCase.java From flink with Apache License 2.0 | 6 votes |
private static JobVertex buildJobVertex(String name, int numEvents, int delay, String accName) throws IOException { final JobVertex vertex = new JobVertex(name); final OperatorID opId = OperatorID.fromJobVertexID(vertex.getID()); vertex.setParallelism(1); vertex.setInvokableClass(EventCollectingTask.class); vertex.getConfiguration().setString(ACC_NAME, accName); final OperatorCoordinator.Provider provider = new OperatorCoordinator.Provider() { @Override public OperatorID getOperatorId() { return opId; } @Override public OperatorCoordinator create(OperatorCoordinator.Context context) { return new EventSendingCoordinator(context, numEvents, delay); } }; vertex.addOperatorCoordinator(new SerializedValue<>(provider)); return vertex; }
Example #15
Source File: BlobWriter.java From flink with Apache License 2.0 | 6 votes |
/** * Serializes the given value and offloads it to the BlobServer if its size exceeds the minimum * offloading size of the BlobServer. * * @param value to serialize * @param jobId to which the value belongs. * @param blobWriter to use to offload the serialized value * @param <T> type of the value to serialize * @return Either the serialized value or the stored blob key * @throws IOException if the data cannot be serialized */ static <T> Either<SerializedValue<T>, PermanentBlobKey> serializeAndTryOffload( T value, JobID jobId, BlobWriter blobWriter) throws IOException { Preconditions.checkNotNull(value); Preconditions.checkNotNull(jobId); Preconditions.checkNotNull(blobWriter); final SerializedValue<T> serializedValue = new SerializedValue<>(value); if (serializedValue.getByteArray().length < blobWriter.getMinOffloadingSize()) { return Either.Left(new SerializedValue<>(value)); } else { try { final PermanentBlobKey permanentBlobKey = blobWriter.putPermanent(jobId, serializedValue.getByteArray()); return Either.Right(permanentBlobKey); } catch (IOException e) { LOG.warn("Failed to offload value {} for job {} to BLOB store.", value, jobId, e); return Either.Left(serializedValue); } } }
Example #16
Source File: TaskDeploymentDescriptorTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testOffLoadedAndNonOffLoadedPayload() { final TaskDeploymentDescriptor taskDeploymentDescriptor = createTaskDeploymentDescriptor( new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobInformation), new TaskDeploymentDescriptor.Offloaded<>(new PermanentBlobKey())); SerializedValue<JobInformation> actualSerializedJobInformation = taskDeploymentDescriptor.getSerializedJobInformation(); assertThat(actualSerializedJobInformation, is(serializedJobInformation)); try { taskDeploymentDescriptor.getSerializedTaskInformation(); fail("Expected to fail since the task information should be offloaded."); } catch (IllegalStateException expected) { // expected } }
Example #17
Source File: ExecutionGraph.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * This constructor is for tests only, because it sets default values for many fields. */ @VisibleForTesting ExecutionGraph( ScheduledExecutorService futureExecutor, Executor ioExecutor, JobID jobId, String jobName, Configuration jobConfig, SerializedValue<ExecutionConfig> serializedConfig, Time timeout, RestartStrategy restartStrategy, SlotProvider slotProvider) throws IOException { this( new JobInformation( jobId, jobName, serializedConfig, jobConfig, Collections.emptyList(), Collections.emptyList()), futureExecutor, ioExecutor, timeout, restartStrategy, slotProvider); }
Example #18
Source File: JobCheckpointingSettings.java From flink with Apache License 2.0 | 5 votes |
public JobCheckpointingSettings( List<JobVertexID> verticesToTrigger, List<JobVertexID> verticesToAcknowledge, List<JobVertexID> verticesToConfirm, CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration, @Nullable SerializedValue<StateBackend> defaultStateBackend) { this( verticesToTrigger, verticesToAcknowledge, verticesToConfirm, checkpointCoordinatorConfiguration, defaultStateBackend, null); }
Example #19
Source File: AbstractFetcherTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testPeriodicWatermarksWithNoSubscribedPartitionsShouldYieldNoWatermarks() throws Exception { final String testTopic = "test topic name"; Map<KafkaTopicPartition, Long> originalPartitions = new HashMap<>(); TestSourceContext<Long> sourceContext = new TestSourceContext<>(); TestProcessingTimeService processingTimeProvider = new TestProcessingTimeService(); TestFetcher<Long> fetcher = new TestFetcher<>( sourceContext, originalPartitions, new SerializedValue<AssignerWithPeriodicWatermarks<Long>>(new PeriodicTestExtractor()), null, /* punctuated watermarks assigner*/ processingTimeProvider, 10); processingTimeProvider.setCurrentTime(10); // no partitions; when the periodic watermark emitter fires, no watermark should be emitted assertFalse(sourceContext.hasWatermark()); // counter-test that when the fetcher does actually have partitions, // when the periodic watermark emitter fires again, a watermark really is emitted fetcher.addDiscoveredPartitions(Collections.singletonList(new KafkaTopicPartition(testTopic, 0))); fetcher.emitRecord(100L, fetcher.subscribedPartitionStates().get(0), 3L); processingTimeProvider.setCurrentTime(20); assertEquals(100, sourceContext.getLatestWatermark().getTimestamp()); }
Example #20
Source File: ArchivedExecutionGraph.java From flink with Apache License 2.0 | 5 votes |
/** * Create a {@link ArchivedExecutionGraph} from the given {@link ExecutionGraph}. * * @param executionGraph to create the ArchivedExecutionGraph from * @return ArchivedExecutionGraph created from the given ExecutionGraph */ public static ArchivedExecutionGraph createFrom(ExecutionGraph executionGraph) { final int numberVertices = executionGraph.getTotalNumberOfVertices(); Map<JobVertexID, ArchivedExecutionJobVertex> archivedTasks = new HashMap<>(numberVertices); List<ArchivedExecutionJobVertex> archivedVerticesInCreationOrder = new ArrayList<>(numberVertices); for (ExecutionJobVertex task : executionGraph.getVerticesTopologically()) { ArchivedExecutionJobVertex archivedTask = task.archive(); archivedVerticesInCreationOrder.add(archivedTask); archivedTasks.put(task.getJobVertexId(), archivedTask); } final Map<String, SerializedValue<OptionalFailure<Object>>> serializedUserAccumulators = executionGraph.getAccumulatorsSerialized(); final long[] timestamps = new long[JobStatus.values().length]; for (JobStatus jobStatus : JobStatus.values()) { final int ordinal = jobStatus.ordinal(); timestamps[ordinal] = executionGraph.getStatusTimestamp(jobStatus); } return new ArchivedExecutionGraph( executionGraph.getJobID(), executionGraph.getJobName(), archivedTasks, archivedVerticesInCreationOrder, timestamps, executionGraph.getState(), executionGraph.getFailureInfo(), executionGraph.getJsonPlan(), executionGraph.getAccumulatorResultsStringified(), serializedUserAccumulators, executionGraph.getArchivedExecutionConfig(), executionGraph.isStoppable(), executionGraph.getCheckpointCoordinatorConfiguration(), executionGraph.getCheckpointStatsSnapshot(), executionGraph.getStateBackendName().orElse(null)); }
Example #21
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
@Override protected AbstractFetcher<T, ?> createFetcher( SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> thisSubtaskPartitionsWithStartOffsets, SerializedValue<WatermarkStrategy<T>> watermarkStrategy, StreamingRuntimeContext runtimeContext, OffsetCommitMode offsetCommitMode, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { return testFetcherSupplier.get(); }
Example #22
Source File: ArchivedExecutionGraphTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private static void compareSerializedAccumulators( Map<String, SerializedValue<OptionalFailure<Object>>> runtimeAccs, Map<String, SerializedValue<OptionalFailure<Object>>> archivedAccs) throws IOException, ClassNotFoundException { assertEquals(runtimeAccs.size(), archivedAccs.size()); for (Entry<String, SerializedValue<OptionalFailure<Object>>> runtimeAcc : runtimeAccs.entrySet()) { long runtimeUserAcc = (long) runtimeAcc.getValue().deserializeValue(ClassLoader.getSystemClassLoader()).getUnchecked(); long archivedUserAcc = (long) archivedAccs.get(runtimeAcc.getKey()).deserializeValue(ClassLoader.getSystemClassLoader()).getUnchecked(); assertEquals(runtimeUserAcc, archivedUserAcc); } }
Example #23
Source File: ArchivedExecutionGraph.java From flink with Apache License 2.0 | 5 votes |
public ArchivedExecutionGraph( JobID jobID, String jobName, Map<JobVertexID, ArchivedExecutionJobVertex> tasks, List<ArchivedExecutionJobVertex> verticesInCreationOrder, long[] stateTimestamps, JobStatus state, @Nullable ErrorInfo failureCause, String jsonPlan, StringifiedAccumulatorResult[] archivedUserAccumulators, Map<String, SerializedValue<OptionalFailure<Object>>> serializedUserAccumulators, ArchivedExecutionConfig executionConfig, boolean isStoppable, @Nullable CheckpointCoordinatorConfiguration jobCheckpointingConfiguration, @Nullable CheckpointStatsSnapshot checkpointStatsSnapshot) { this.jobID = Preconditions.checkNotNull(jobID); this.jobName = Preconditions.checkNotNull(jobName); this.tasks = Preconditions.checkNotNull(tasks); this.verticesInCreationOrder = Preconditions.checkNotNull(verticesInCreationOrder); this.stateTimestamps = Preconditions.checkNotNull(stateTimestamps); this.state = Preconditions.checkNotNull(state); this.failureCause = failureCause; this.jsonPlan = Preconditions.checkNotNull(jsonPlan); this.archivedUserAccumulators = Preconditions.checkNotNull(archivedUserAccumulators); this.serializedUserAccumulators = Preconditions.checkNotNull(serializedUserAccumulators); this.archivedExecutionConfig = Preconditions.checkNotNull(executionConfig); this.isStoppable = isStoppable; this.jobCheckpointingConfiguration = jobCheckpointingConfiguration; this.checkpointStatsSnapshot = checkpointStatsSnapshot; }
Example #24
Source File: ArchivedExecutionGraph.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public ArchivedExecutionGraph( JobID jobID, String jobName, Map<JobVertexID, ArchivedExecutionJobVertex> tasks, List<ArchivedExecutionJobVertex> verticesInCreationOrder, long[] stateTimestamps, JobStatus state, @Nullable ErrorInfo failureCause, String jsonPlan, StringifiedAccumulatorResult[] archivedUserAccumulators, Map<String, SerializedValue<OptionalFailure<Object>>> serializedUserAccumulators, ArchivedExecutionConfig executionConfig, boolean isStoppable, @Nullable CheckpointCoordinatorConfiguration jobCheckpointingConfiguration, @Nullable CheckpointStatsSnapshot checkpointStatsSnapshot) { this.jobID = Preconditions.checkNotNull(jobID); this.jobName = Preconditions.checkNotNull(jobName); this.tasks = Preconditions.checkNotNull(tasks); this.verticesInCreationOrder = Preconditions.checkNotNull(verticesInCreationOrder); this.stateTimestamps = Preconditions.checkNotNull(stateTimestamps); this.state = Preconditions.checkNotNull(state); this.failureCause = failureCause; this.jsonPlan = Preconditions.checkNotNull(jsonPlan); this.archivedUserAccumulators = Preconditions.checkNotNull(archivedUserAccumulators); this.serializedUserAccumulators = Preconditions.checkNotNull(serializedUserAccumulators); this.archivedExecutionConfig = Preconditions.checkNotNull(executionConfig); this.isStoppable = isStoppable; this.jobCheckpointingConfiguration = jobCheckpointingConfiguration; this.checkpointStatsSnapshot = checkpointStatsSnapshot; }
Example #25
Source File: JobMaster.java From flink with Apache License 2.0 | 5 votes |
@Override public CompletableFuture<Acknowledge> sendOperatorEventToCoordinator( final ExecutionAttemptID task, final OperatorID operatorID, final SerializedValue<OperatorEvent> serializedEvent) { try { final OperatorEvent evt = serializedEvent.deserializeValue(userCodeLoader); schedulerNG.deliverOperatorEventToCoordinator(task, operatorID, evt); return CompletableFuture.completedFuture(Acknowledge.get()); } catch (Exception e) { return FutureUtils.completedExceptionally(e); } }
Example #26
Source File: ExecutionGraphDeploymentWithBlobServerTest.java From flink with Apache License 2.0 | 5 votes |
@Override protected void checkJobOffloaded(ExecutionGraph eg) throws Exception { Either<SerializedValue<JobInformation>, PermanentBlobKey> jobInformationOrBlobKey = eg.getJobInformationOrBlobKey(); assertTrue(jobInformationOrBlobKey.isRight()); // must not throw: blobServer.getFile(eg.getJobID(), jobInformationOrBlobKey.right()); }
Example #27
Source File: KafkaFetcher.java From flink with Apache License 2.0 | 5 votes |
public KafkaFetcher( SourceFunction.SourceContext<T> sourceContext, Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets, SerializedValue<WatermarkStrategy<T>> watermarkStrategy, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, String taskNameWithSubtasks, KafkaDeserializationSchema<T> deserializer, Properties kafkaProperties, long pollTimeout, MetricGroup subtaskMetricGroup, MetricGroup consumerMetricGroup, boolean useMetrics) throws Exception { super( sourceContext, assignedPartitionsWithInitialOffsets, watermarkStrategy, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, consumerMetricGroup, useMetrics); this.deserializer = deserializer; this.handover = new Handover(); this.consumerThread = new KafkaConsumerThread( LOG, handover, kafkaProperties, unassignedPartitionsQueue, getFetcherName() + " for " + taskNameWithSubtasks, pollTimeout, useMetrics, consumerMetricGroup, subtaskMetricGroup); this.kafkaCollector = new KafkaCollector(); }
Example #28
Source File: OperatorEventDispatcherImpl.java From flink with Apache License 2.0 | 5 votes |
@Override public void sendEventToCoordinator(OperatorEvent event) { final SerializedValue<OperatorEvent> serializedEvent; try { serializedEvent = new SerializedValue<>(event); } catch (IOException e) { // this is not a recoverable situation, so we wrap this in an // unchecked exception and let it bubble up throw new FlinkRuntimeException("Cannot serialize operator event", e); } toCoordinator.sendOperatorEventToCoordinator(operatorId, serializedEvent); }
Example #29
Source File: OperatorEventValveTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void eventsBlockedByClosedValve() throws Exception { final TestEventSender sender = new TestEventSender(); final OperatorEventValve valve = new OperatorEventValve(sender); valve.markForCheckpoint(1L); valve.shutValve(1L); final CompletableFuture<Acknowledge> future = valve.sendEvent(new SerializedValue<>(new TestOperatorEvent()), 1); assertTrue(sender.events.isEmpty()); assertFalse(future.isDone()); }
Example #30
Source File: PulsarFetcher.java From pulsar-flink with Apache License 2.0 | 5 votes |
public PulsarFetcher( SourceContext<T> sourceContext, Map<String, MessageId> seedTopicsWithInitialOffsets, SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic, SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated, ProcessingTimeService processingTimeProvider, long autoWatermarkInterval, ClassLoader userCodeClassLoader, StreamingRuntimeContext runtimeContext, ClientConfigurationData clientConf, Map<String, Object> readerConf, int pollTimeoutMs, DeserializationSchema<T> deserializer, PulsarMetadataReader metadataReader) throws Exception { this( sourceContext, seedTopicsWithInitialOffsets, watermarksPeriodic, watermarksPunctuated, processingTimeProvider, autoWatermarkInterval, userCodeClassLoader, runtimeContext, clientConf, readerConf, pollTimeoutMs, 3, // commit retries before fail deserializer, metadataReader); }