Java Code Examples for org.apache.flink.types.Either#right()
The following examples show how to use
org.apache.flink.types.Either#right() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RemoteChannelStateChecker.java From flink with Apache License 2.0 | 6 votes |
public boolean isProducerReadyOrAbortConsumption(ResponseHandle responseHandle) { Either<ExecutionState, Throwable> result = responseHandle.getProducerExecutionState(); ExecutionState consumerExecutionState = responseHandle.getConsumerExecutionState(); if (!isConsumerStateValidForConsumption(consumerExecutionState)) { LOG.debug( "Ignore a partition producer state notification for task {}, because it's not running.", taskNameWithSubtask); } else if (result.isLeft() || result.right() instanceof TimeoutException) { boolean isProducerConsumerReady = isProducerConsumerReady(responseHandle); if (isProducerConsumerReady) { return true; } else { abortConsumptionOrIgnoreCheckResult(responseHandle); } } else { handleFailedCheckResult(responseHandle); } return false; }
Example 2
Source File: RemoteChannelStateChecker.java From flink with Apache License 2.0 | 6 votes |
public boolean isProducerReadyOrAbortConsumption(ResponseHandle responseHandle) { Either<ExecutionState, Throwable> result = responseHandle.getProducerExecutionState(); ExecutionState consumerExecutionState = responseHandle.getConsumerExecutionState(); if (!isConsumerStateValidForConsumption(consumerExecutionState)) { LOG.debug( "Ignore a partition producer state notification for task {}, because it's not running.", taskNameWithSubtask); } else if (result.isLeft() || result.right() instanceof TimeoutException) { boolean isProducerConsumerReady = isProducerConsumerReady(responseHandle); if (isProducerConsumerReady) { return true; } else { abortConsumptionOrIgnoreCheckResult(responseHandle); } } else { handleFailedCheckResult(responseHandle); } return false; }
Example 3
Source File: EitherSerializer.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public Either<L, R> copy(Either<L, R> from) { if (from.isLeft()) { L left = from.left(); L copyLeft = leftSerializer.copy(left); return Left(copyLeft); } else { R right = from.right(); R copyRight = rightSerializer.copy(right); return Right(copyRight); } }
Example 4
Source File: VertexCentricIteration.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public void flatMap(Either<Vertex<K, VV>, Tuple2<K, Message>> value, Collector<Tuple2<K, Either<NullValue, Message>>> out) { if (value.isRight()) { Tuple2<K, Message> message = value.right(); outTuple.f0 = message.f0; outTuple.f1 = Either.Right(message.f1); out.collect(outTuple); } }
Example 5
Source File: EitherSerializer.java From flink with Apache License 2.0 | 5 votes |
@Override public Either<L, R> copy(Either<L, R> from) { if (from.isLeft()) { L left = from.left(); L copyLeft = leftSerializer.copy(left); return Left(copyLeft); } else { R right = from.right(); R copyRight = rightSerializer.copy(right); return Right(copyRight); } }
Example 6
Source File: VertexCentricIteration.java From flink with Apache License 2.0 | 5 votes |
public void flatMap(Either<Vertex<K, VV>, Tuple2<K, Message>> value, Collector<Tuple2<K, Either<NullValue, Message>>> out) { if (value.isRight()) { Tuple2<K, Message> message = value.right(); outTuple.f0 = message.f0; outTuple.f1 = Either.Right(message.f1); out.collect(outTuple); } }
Example 7
Source File: TaskDeploymentDescriptorFactory.java From flink with Apache License 2.0 | 5 votes |
private static MaybeOffloaded<JobInformation> getSerializedJobInformation(ExecutionGraph executionGraph) { Either<SerializedValue<JobInformation>, PermanentBlobKey> jobInformationOrBlobKey = executionGraph.getJobInformationOrBlobKey(); if (jobInformationOrBlobKey.isLeft()) { return new TaskDeploymentDescriptor.NonOffloaded<>(jobInformationOrBlobKey.left()); } else { return new TaskDeploymentDescriptor.Offloaded<>(jobInformationOrBlobKey.right()); } }
Example 8
Source File: TaskDeploymentDescriptorFactory.java From flink with Apache License 2.0 | 5 votes |
private static MaybeOffloaded<TaskInformation> getSerializedTaskInformation( Either<SerializedValue<TaskInformation>, PermanentBlobKey> taskInfo) { return taskInfo.isLeft() ? new TaskDeploymentDescriptor.NonOffloaded<>(taskInfo.left()) : new TaskDeploymentDescriptor.Offloaded<>(taskInfo.right()); }
Example 9
Source File: EitherSerializer.java From flink with Apache License 2.0 | 5 votes |
@Override public Either<L, R> copy(Either<L, R> from) { if (from.isLeft()) { L left = from.left(); L copyLeft = leftSerializer.copy(left); return Left(copyLeft); } else { R right = from.right(); R copyRight = rightSerializer.copy(right); return Right(copyRight); } }
Example 10
Source File: VertexCentricIteration.java From flink with Apache License 2.0 | 5 votes |
public void flatMap(Either<Vertex<K, VV>, Tuple2<K, Message>> value, Collector<Tuple2<K, Either<NullValue, Message>>> out) { if (value.isRight()) { Tuple2<K, Message> message = value.right(); outTuple.f0 = message.f0; outTuple.f1 = Either.Right(message.f1); out.collect(outTuple); } }
Example 11
Source File: TaskDeploymentDescriptorFactory.java From flink with Apache License 2.0 | 5 votes |
private static MaybeOffloaded<JobInformation> getSerializedJobInformation(ExecutionGraph executionGraph) { Either<SerializedValue<JobInformation>, PermanentBlobKey> jobInformationOrBlobKey = executionGraph.getJobInformationOrBlobKey(); if (jobInformationOrBlobKey.isLeft()) { return new TaskDeploymentDescriptor.NonOffloaded<>(jobInformationOrBlobKey.left()); } else { return new TaskDeploymentDescriptor.Offloaded<>(jobInformationOrBlobKey.right()); } }
Example 12
Source File: TaskDeploymentDescriptorFactory.java From flink with Apache License 2.0 | 5 votes |
private static MaybeOffloaded<TaskInformation> getSerializedTaskInformation( Either<SerializedValue<TaskInformation>, PermanentBlobKey> taskInfo) { return taskInfo.isLeft() ? new TaskDeploymentDescriptor.NonOffloaded<>(taskInfo.left()) : new TaskDeploymentDescriptor.Offloaded<>(taskInfo.right()); }
Example 13
Source File: ExecutionVertex.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Creates a task deployment descriptor to deploy a subtask to the given target slot. * TODO: This should actually be in the EXECUTION */ TaskDeploymentDescriptor createDeploymentDescriptor( ExecutionAttemptID executionId, LogicalSlot targetSlot, @Nullable JobManagerTaskRestore taskRestore, int attemptNumber) throws ExecutionGraphException { // Produced intermediate results List<ResultPartitionDeploymentDescriptor> producedPartitions = new ArrayList<>(resultPartitions.size()); // Consumed intermediate results List<InputGateDeploymentDescriptor> consumedPartitions = new ArrayList<>(inputEdges.length); boolean lazyScheduling = getExecutionGraph().getScheduleMode().allowLazyDeployment(); for (IntermediateResultPartition partition : resultPartitions.values()) { List<List<ExecutionEdge>> consumers = partition.getConsumers(); if (consumers.isEmpty()) { //TODO this case only exists for test, currently there has to be exactly one consumer in real jobs! producedPartitions.add(ResultPartitionDeploymentDescriptor.from( partition, KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM, lazyScheduling)); } else { Preconditions.checkState(1 == consumers.size(), "Only one consumer supported in the current implementation! Found: " + consumers.size()); List<ExecutionEdge> consumer = consumers.get(0); ExecutionJobVertex vertex = consumer.get(0).getTarget().getJobVertex(); int maxParallelism = vertex.getMaxParallelism(); producedPartitions.add(ResultPartitionDeploymentDescriptor.from(partition, maxParallelism, lazyScheduling)); } } for (ExecutionEdge[] edges : inputEdges) { InputChannelDeploymentDescriptor[] partitions = InputChannelDeploymentDescriptor.fromEdges( edges, targetSlot.getTaskManagerLocation().getResourceID(), lazyScheduling); // If the produced partition has multiple consumers registered, we // need to request the one matching our sub task index. // TODO Refactor after removing the consumers from the intermediate result partitions int numConsumerEdges = edges[0].getSource().getConsumers().get(0).size(); int queueToRequest = subTaskIndex % numConsumerEdges; IntermediateResult consumedIntermediateResult = edges[0].getSource().getIntermediateResult(); final IntermediateDataSetID resultId = consumedIntermediateResult.getId(); final ResultPartitionType partitionType = consumedIntermediateResult.getResultType(); consumedPartitions.add(new InputGateDeploymentDescriptor(resultId, partitionType, queueToRequest, partitions)); } final Either<SerializedValue<JobInformation>, PermanentBlobKey> jobInformationOrBlobKey = getExecutionGraph().getJobInformationOrBlobKey(); final TaskDeploymentDescriptor.MaybeOffloaded<JobInformation> serializedJobInformation; if (jobInformationOrBlobKey.isLeft()) { serializedJobInformation = new TaskDeploymentDescriptor.NonOffloaded<>(jobInformationOrBlobKey.left()); } else { serializedJobInformation = new TaskDeploymentDescriptor.Offloaded<>(jobInformationOrBlobKey.right()); } final Either<SerializedValue<TaskInformation>, PermanentBlobKey> taskInformationOrBlobKey; try { taskInformationOrBlobKey = jobVertex.getTaskInformationOrBlobKey(); } catch (IOException e) { throw new ExecutionGraphException( "Could not create a serialized JobVertexInformation for " + jobVertex.getJobVertexId(), e); } final TaskDeploymentDescriptor.MaybeOffloaded<TaskInformation> serializedTaskInformation; if (taskInformationOrBlobKey.isLeft()) { serializedTaskInformation = new TaskDeploymentDescriptor.NonOffloaded<>(taskInformationOrBlobKey.left()); } else { serializedTaskInformation = new TaskDeploymentDescriptor.Offloaded<>(taskInformationOrBlobKey.right()); } return new TaskDeploymentDescriptor( getJobId(), serializedJobInformation, serializedTaskInformation, executionId, targetSlot.getAllocationId(), subTaskIndex, attemptNumber, targetSlot.getPhysicalSlotNumber(), taskRestore, producedPartitions, consumedPartitions); }