org.apache.flink.util.OptionalFailure Java Examples
The following examples show how to use
org.apache.flink.util.OptionalFailure.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RestClusterClientTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override protected CompletableFuture<AsynchronousOperationResult<AsynchronousOperationInfo>> handleRequest(@Nonnull HandlerRequest<EmptyRequestBody, SavepointDisposalStatusMessageParameters> request, @Nonnull DispatcherGateway gateway) throws RestHandlerException { final TriggerId actualTriggerId = request.getPathParameter(TriggerIdPathParameter.class); if (actualTriggerId.equals(triggerId)) { final OptionalFailure<AsynchronousOperationInfo> nextResponse = responses.poll(); if (nextResponse != null) { if (nextResponse.isFailure()) { throw new RestHandlerException("Failure", HttpResponseStatus.BAD_REQUEST, nextResponse.getFailureCause()); } else { return CompletableFuture.completedFuture(AsynchronousOperationResult.completed(nextResponse.getUnchecked())); } } else { throw new AssertionError(); } } else { throw new AssertionError(); } }
Example #2
Source File: RestClusterClientTest.java From flink with Apache License 2.0 | 6 votes |
@Override protected CompletableFuture<AsynchronousOperationResult<AsynchronousOperationInfo>> handleRequest(@Nonnull HandlerRequest<EmptyRequestBody, SavepointDisposalStatusMessageParameters> request, @Nonnull DispatcherGateway gateway) throws RestHandlerException { final TriggerId actualTriggerId = request.getPathParameter(TriggerIdPathParameter.class); if (actualTriggerId.equals(triggerId)) { final OptionalFailure<AsynchronousOperationInfo> nextResponse = responses.poll(); if (nextResponse != null) { if (nextResponse.isFailure()) { throw new RestHandlerException("Failure", HttpResponseStatus.BAD_REQUEST, nextResponse.getFailureCause()); } else { return CompletableFuture.completedFuture(AsynchronousOperationResult.completed(nextResponse.getUnchecked())); } } else { throw new AssertionError(); } } else { throw new AssertionError(); } }
Example #3
Source File: AccumulatorHelper.java From flink with Apache License 2.0 | 6 votes |
/** * Merge two collections of accumulators. The second will be merged into the * first. * * @param target * The collection of accumulators that will be updated * @param toMerge * The collection of accumulators that will be merged into the * other */ public static void mergeInto(Map<String, OptionalFailure<Accumulator<?, ?>>> target, Map<String, Accumulator<?, ?>> toMerge) { for (Map.Entry<String, Accumulator<?, ?>> otherEntry : toMerge.entrySet()) { OptionalFailure<Accumulator<?, ?>> ownAccumulator = target.get(otherEntry.getKey()); if (ownAccumulator == null) { // Create initial counter (copy!) target.put( otherEntry.getKey(), wrapUnchecked(otherEntry.getKey(), () -> otherEntry.getValue().clone())); } else if (ownAccumulator.isFailure()) { continue; } else { Accumulator<?, ?> accumulator = ownAccumulator.getUnchecked(); // Both should have the same type compareAccumulatorTypes(otherEntry.getKey(), accumulator.getClass(), otherEntry.getValue().getClass()); // Merge target counter with other counter target.put( otherEntry.getKey(), wrapUnchecked(otherEntry.getKey(), () -> mergeSingle(accumulator, otherEntry.getValue().clone()))); } } }
Example #4
Source File: StringifiedAccumulatorResultTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void stringifyingResultsShouldIncorporateAccumulatorLocalValueDirectly() { final String name = "a"; final int targetValue = 314159; final IntCounter acc = new IntCounter(); acc.add(targetValue); final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>(); accumulatorMap.put(name, OptionalFailure.of(acc)); final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap); assertEquals(1, results.length); final StringifiedAccumulatorResult firstResult = results[0]; assertEquals(name, firstResult.getName()); assertEquals("IntCounter", firstResult.getType()); assertEquals(Integer.toString(targetValue), firstResult.getValue()); }
Example #5
Source File: StringifiedAccumulatorResult.java From flink with Apache License 2.0 | 6 votes |
private static StringifiedAccumulatorResult stringifyAccumulatorResult( String name, @Nullable OptionalFailure<Accumulator<?, ?>> accumulator) { if (accumulator == null) { return new StringifiedAccumulatorResult(name, "null", "null"); } else if (accumulator.isFailure()) { return new StringifiedAccumulatorResult( name, "null", ExceptionUtils.stringifyException(accumulator.getFailureCause())); } else { Object localValue; String simpleName = "null"; try { simpleName = accumulator.getUnchecked().getClass().getSimpleName(); localValue = accumulator.getUnchecked().getLocalValue(); } catch (RuntimeException exception) { LOG.error("Failed to stringify accumulator [" + name + "]", exception); localValue = ExceptionUtils.stringifyException(exception); } return new StringifiedAccumulatorResult(name, simpleName, localValue != null ? localValue.toString() : "null"); } }
Example #6
Source File: StringifiedAccumulatorResultTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void stringifyingResultsShouldReportNullLocalValueAsNonnullValueString() { final String name = "a"; final NullBearingAccumulator acc = new NullBearingAccumulator(); final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>(); accumulatorMap.put(name, OptionalFailure.of(acc)); final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap); assertEquals(1, results.length); // Note the use of a String with a content of "null" rather than a null value final StringifiedAccumulatorResult firstResult = results[0]; assertEquals(name, firstResult.getName()); assertEquals("NullBearingAccumulator", firstResult.getType()); assertEquals("null", firstResult.getValue()); }
Example #7
Source File: StringifiedAccumulatorResultTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void stringifyingResultsShouldReportNullAccumulatorWithNonnullValueAndTypeString() { final String name = "a"; final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>(); accumulatorMap.put(name, null); final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap); assertEquals(1, results.length); // Note the use of String values with content of "null" rather than null values final StringifiedAccumulatorResult firstResult = results[0]; assertEquals(name, firstResult.getName()); assertEquals("null", firstResult.getType()); assertEquals("null", firstResult.getValue()); }
Example #8
Source File: StringifiedAccumulatorResultTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void stringifyingFailureResults() { final String name = "a"; final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>(); accumulatorMap.put(name, OptionalFailure.ofFailure(new FlinkRuntimeException("Test"))); final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap); assertEquals(1, results.length); // Note the use of String values with content of "null" rather than null values final StringifiedAccumulatorResult firstResult = results[0]; assertEquals(name, firstResult.getName()); assertEquals("null", firstResult.getType()); assertTrue(firstResult.getValue().startsWith("org.apache.flink.util.FlinkRuntimeException: Test")); }
Example #9
Source File: AccumulatorHelper.java From flink with Apache License 2.0 | 6 votes |
/** * Takes the serialized accumulator results and tries to deserialize them using the provided * class loader, and then try to unwrap the value unchecked. * @param serializedAccumulators The serialized accumulator results. * @param loader The class loader to use. * @return The deserialized and unwrapped accumulator results. */ public static Map<String, Object> deserializeAndUnwrapAccumulators( Map<String, SerializedValue<OptionalFailure<Object>>> serializedAccumulators, ClassLoader loader) throws IOException, ClassNotFoundException { Map<String, OptionalFailure<Object>> deserializedAccumulators = deserializeAccumulators(serializedAccumulators, loader); if (deserializedAccumulators.isEmpty()) { return Collections.emptyMap(); } Map<String, Object> accumulators = new HashMap<>(serializedAccumulators.size()); for (Map.Entry<String, OptionalFailure<Object>> entry : deserializedAccumulators.entrySet()) { accumulators.put(entry.getKey(), entry.getValue().getUnchecked()); } return accumulators; }
Example #10
Source File: JobResultDeserializer.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") private Map<String, SerializedValue<OptionalFailure<Object>>> parseAccumulatorResults( final JsonParser p, final DeserializationContext ctxt) throws IOException { final Map<String, SerializedValue<OptionalFailure<Object>>> accumulatorResults = new HashMap<>(); while (true) { final JsonToken jsonToken = p.nextToken(); assertNotEndOfInput(p, jsonToken); if (jsonToken == JsonToken.END_OBJECT) { break; } final String accumulatorName = p.getValueAsString(); p.nextValue(); accumulatorResults.put( accumulatorName, (SerializedValue<OptionalFailure<Object>>) serializedValueDeserializer.deserialize(p, ctxt)); } return accumulatorResults; }
Example #11
Source File: JobResultDeserializer.java From flink with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") private Map<String, SerializedValue<OptionalFailure<Object>>> parseAccumulatorResults( final JsonParser p, final DeserializationContext ctxt) throws IOException { final Map<String, SerializedValue<OptionalFailure<Object>>> accumulatorResults = new HashMap<>(); while (true) { final JsonToken jsonToken = p.nextToken(); assertNotEndOfInput(p, jsonToken); if (jsonToken == JsonToken.END_OBJECT) { break; } final String accumulatorName = p.getValueAsString(); p.nextValue(); accumulatorResults.put( accumulatorName, (SerializedValue<OptionalFailure<Object>>) serializedValueDeserializer.deserialize(p, ctxt)); } return accumulatorResults; }
Example #12
Source File: JobResultDeserializer.java From flink with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") private Map<String, SerializedValue<OptionalFailure<Object>>> parseAccumulatorResults( final JsonParser p, final DeserializationContext ctxt) throws IOException { final Map<String, SerializedValue<OptionalFailure<Object>>> accumulatorResults = new HashMap<>(); while (true) { final JsonToken jsonToken = p.nextToken(); assertNotEndOfInput(p, jsonToken); if (jsonToken == JsonToken.END_OBJECT) { break; } final String accumulatorName = p.getValueAsString(); p.nextValue(); accumulatorResults.put( accumulatorName, (SerializedValue<OptionalFailure<Object>>) serializedValueDeserializer.deserialize(p, ctxt)); } return accumulatorResults; }
Example #13
Source File: AccumulatorHelper.java From flink with Apache License 2.0 | 6 votes |
/** * Takes the serialized accumulator results and tries to deserialize them using the provided * class loader. * @param serializedAccumulators The serialized accumulator results. * @param loader The class loader to use. * @return The deserialized accumulator results. * @throws IOException * @throws ClassNotFoundException */ public static Map<String, OptionalFailure<Object>> deserializeAccumulators( Map<String, SerializedValue<OptionalFailure<Object>>> serializedAccumulators, ClassLoader loader) throws IOException, ClassNotFoundException { if (serializedAccumulators == null || serializedAccumulators.isEmpty()) { return Collections.emptyMap(); } Map<String, OptionalFailure<Object>> accumulators = new HashMap<>(serializedAccumulators.size()); for (Map.Entry<String, SerializedValue<OptionalFailure<Object>>> entry : serializedAccumulators.entrySet()) { OptionalFailure<Object> value = null; if (entry.getValue() != null) { value = entry.getValue().deserializeValue(loader); } accumulators.put(entry.getKey(), value); } return accumulators; }
Example #14
Source File: RestClusterClientTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testGetAccumulators() throws Exception { TestAccumulatorHandler accumulatorHandler = new TestAccumulatorHandler(); try (TestRestServerEndpoint restServerEndpoint = createRestServerEndpoint(accumulatorHandler)){ RestClusterClient<?> restClusterClient = createRestClusterClient(restServerEndpoint.getServerAddress().getPort()); try { JobID id = new JobID(); { Map<String, OptionalFailure<Object>> accumulators = restClusterClient.getAccumulators(id); assertNotNull(accumulators); assertEquals(1, accumulators.size()); assertEquals(true, accumulators.containsKey("testKey")); assertEquals("testValue", accumulators.get("testKey").get().toString()); } } finally { restClusterClient.shutdown(); } } }
Example #15
Source File: StringifiedAccumulatorResultTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void stringifyingResultsShouldReportNullAccumulatorWithNonnullValueAndTypeString() { final String name = "a"; final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>(); accumulatorMap.put(name, null); final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap); assertEquals(1, results.length); // Note the use of String values with content of "null" rather than null values final StringifiedAccumulatorResult firstResult = results[0]; assertEquals(name, firstResult.getName()); assertEquals("null", firstResult.getType()); assertEquals("null", firstResult.getValue()); }
Example #16
Source File: AccumulatorHelper.java From flink with Apache License 2.0 | 6 votes |
/** * Merge two collections of accumulators. The second will be merged into the * first. * * @param target * The collection of accumulators that will be updated * @param toMerge * The collection of accumulators that will be merged into the * other */ public static void mergeInto(Map<String, OptionalFailure<Accumulator<?, ?>>> target, Map<String, Accumulator<?, ?>> toMerge) { for (Map.Entry<String, Accumulator<?, ?>> otherEntry : toMerge.entrySet()) { OptionalFailure<Accumulator<?, ?>> ownAccumulator = target.get(otherEntry.getKey()); if (ownAccumulator == null) { // Create initial counter (copy!) target.put( otherEntry.getKey(), wrapUnchecked(otherEntry.getKey(), () -> otherEntry.getValue().clone())); } else if (ownAccumulator.isFailure()) { continue; } else { Accumulator<?, ?> accumulator = ownAccumulator.getUnchecked(); // Both should have the same type compareAccumulatorTypes(otherEntry.getKey(), accumulator.getClass(), otherEntry.getValue().getClass()); // Merge target counter with other counter target.put( otherEntry.getKey(), wrapUnchecked(otherEntry.getKey(), () -> mergeSingle(accumulator, otherEntry.getValue().clone()))); } } }
Example #17
Source File: StringifiedAccumulatorResult.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private static StringifiedAccumulatorResult stringifyAccumulatorResult( String name, @Nullable OptionalFailure<Accumulator<?, ?>> accumulator) { if (accumulator == null) { return new StringifiedAccumulatorResult(name, "null", "null"); } else if (accumulator.isFailure()) { return new StringifiedAccumulatorResult( name, "null", ExceptionUtils.stringifyException(accumulator.getFailureCause())); } else { Object localValue; String simpleName = "null"; try { simpleName = accumulator.getUnchecked().getClass().getSimpleName(); localValue = accumulator.getUnchecked().getLocalValue(); } catch (RuntimeException exception) { LOG.error("Failed to stringify accumulator [" + name + "]", exception); localValue = ExceptionUtils.stringifyException(exception); } return new StringifiedAccumulatorResult(name, simpleName, localValue != null ? localValue.toString() : "null"); } }
Example #18
Source File: RestClusterClientTest.java From flink with Apache License 2.0 | 6 votes |
@Override protected CompletableFuture<AsynchronousOperationResult<AsynchronousOperationInfo>> handleRequest(@Nonnull HandlerRequest<EmptyRequestBody, SavepointDisposalStatusMessageParameters> request, @Nonnull DispatcherGateway gateway) throws RestHandlerException { final TriggerId actualTriggerId = request.getPathParameter(TriggerIdPathParameter.class); if (actualTriggerId.equals(triggerId)) { final OptionalFailure<AsynchronousOperationInfo> nextResponse = responses.poll(); if (nextResponse != null) { if (nextResponse.isFailure()) { throw new RestHandlerException("Failure", HttpResponseStatus.BAD_REQUEST, nextResponse.getFailureCause()); } else { return CompletableFuture.completedFuture(AsynchronousOperationResult.completed(nextResponse.getUnchecked())); } } else { throw new AssertionError(); } } else { throw new AssertionError(); } }
Example #19
Source File: StringifiedAccumulatorResultTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void stringifyingResultsShouldIncorporateAccumulatorLocalValueDirectly() { final String name = "a"; final int targetValue = 314159; final IntCounter acc = new IntCounter(); acc.add(targetValue); final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>(); accumulatorMap.put(name, OptionalFailure.of(acc)); final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap); assertEquals(1, results.length); final StringifiedAccumulatorResult firstResult = results[0]; assertEquals(name, firstResult.getName()); assertEquals("IntCounter", firstResult.getType()); assertEquals(Integer.toString(targetValue), firstResult.getValue()); }
Example #20
Source File: RestClusterClientTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testGetAccumulators() throws Exception { TestAccumulatorHandler accumulatorHandler = new TestAccumulatorHandler(); try (TestRestServerEndpoint restServerEndpoint = createRestServerEndpoint(accumulatorHandler)){ RestClusterClient<?> restClusterClient = createRestClusterClient(restServerEndpoint.getServerAddress().getPort()); try { JobID id = new JobID(); { Map<String, OptionalFailure<Object>> accumulators = restClusterClient.getAccumulators(id); assertNotNull(accumulators); assertEquals(1, accumulators.size()); assertEquals(true, accumulators.containsKey("testKey")); assertEquals("testValue", accumulators.get("testKey").get().toString()); } } finally { restClusterClient.shutdown(); } } }
Example #21
Source File: AccumulatorHelper.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Takes the serialized accumulator results and tries to deserialize them using the provided * class loader. * @param serializedAccumulators The serialized accumulator results. * @param loader The class loader to use. * @return The deserialized accumulator results. * @throws IOException * @throws ClassNotFoundException */ public static Map<String, OptionalFailure<Object>> deserializeAccumulators( Map<String, SerializedValue<OptionalFailure<Object>>> serializedAccumulators, ClassLoader loader) throws IOException, ClassNotFoundException { if (serializedAccumulators == null || serializedAccumulators.isEmpty()) { return Collections.emptyMap(); } Map<String, OptionalFailure<Object>> accumulators = new HashMap<>(serializedAccumulators.size()); for (Map.Entry<String, SerializedValue<OptionalFailure<Object>>> entry : serializedAccumulators.entrySet()) { OptionalFailure<Object> value = null; if (entry.getValue() != null) { value = entry.getValue().deserializeValue(loader); } accumulators.put(entry.getKey(), value); } return accumulators; }
Example #22
Source File: StringifiedAccumulatorResultTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void stringifyingResultsShouldReportNullAccumulatorWithNonnullValueAndTypeString() { final String name = "a"; final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>(); accumulatorMap.put(name, null); final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap); assertEquals(1, results.length); // Note the use of String values with content of "null" rather than null values final StringifiedAccumulatorResult firstResult = results[0]; assertEquals(name, firstResult.getName()); assertEquals("null", firstResult.getType()); assertEquals("null", firstResult.getValue()); }
Example #23
Source File: StringifiedAccumulatorResultTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void stringifyingFailureResults() { final String name = "a"; final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>(); accumulatorMap.put(name, OptionalFailure.ofFailure(new FlinkRuntimeException("Test"))); final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap); assertEquals(1, results.length); // Note the use of String values with content of "null" rather than null values final StringifiedAccumulatorResult firstResult = results[0]; assertEquals(name, firstResult.getName()); assertEquals("null", firstResult.getType()); assertTrue(firstResult.getValue().startsWith("org.apache.flink.util.FlinkRuntimeException: Test")); }
Example #24
Source File: AccumulatorHelper.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Merge two collections of accumulators. The second will be merged into the * first. * * @param target * The collection of accumulators that will be updated * @param toMerge * The collection of accumulators that will be merged into the * other */ public static void mergeInto(Map<String, OptionalFailure<Accumulator<?, ?>>> target, Map<String, Accumulator<?, ?>> toMerge) { for (Map.Entry<String, Accumulator<?, ?>> otherEntry : toMerge.entrySet()) { OptionalFailure<Accumulator<?, ?>> ownAccumulator = target.get(otherEntry.getKey()); if (ownAccumulator == null) { // Create initial counter (copy!) target.put( otherEntry.getKey(), wrapUnchecked(otherEntry.getKey(), () -> otherEntry.getValue().clone())); } else if (ownAccumulator.isFailure()) { continue; } else { Accumulator<?, ?> accumulator = ownAccumulator.getUnchecked(); // Both should have the same type compareAccumulatorTypes(otherEntry.getKey(), accumulator.getClass(), otherEntry.getValue().getClass()); // Merge target counter with other counter target.put( otherEntry.getKey(), wrapUnchecked(otherEntry.getKey(), () -> mergeSingle(accumulator, otherEntry.getValue().clone()))); } } }
Example #25
Source File: ArchivedExecutionGraph.java From flink with Apache License 2.0 | 5 votes |
/** * Create a {@link ArchivedExecutionGraph} from the given {@link ExecutionGraph}. * * @param executionGraph to create the ArchivedExecutionGraph from * @return ArchivedExecutionGraph created from the given ExecutionGraph */ public static ArchivedExecutionGraph createFrom(ExecutionGraph executionGraph) { final int numberVertices = executionGraph.getTotalNumberOfVertices(); Map<JobVertexID, ArchivedExecutionJobVertex> archivedTasks = new HashMap<>(numberVertices); List<ArchivedExecutionJobVertex> archivedVerticesInCreationOrder = new ArrayList<>(numberVertices); for (ExecutionJobVertex task : executionGraph.getVerticesTopologically()) { ArchivedExecutionJobVertex archivedTask = task.archive(); archivedVerticesInCreationOrder.add(archivedTask); archivedTasks.put(task.getJobVertexId(), archivedTask); } final Map<String, SerializedValue<OptionalFailure<Object>>> serializedUserAccumulators = executionGraph.getAccumulatorsSerialized(); final long[] timestamps = new long[JobStatus.values().length]; for (JobStatus jobStatus : JobStatus.values()) { final int ordinal = jobStatus.ordinal(); timestamps[ordinal] = executionGraph.getStatusTimestamp(jobStatus); } return new ArchivedExecutionGraph( executionGraph.getJobID(), executionGraph.getJobName(), archivedTasks, archivedVerticesInCreationOrder, timestamps, executionGraph.getState(), executionGraph.getFailureInfo(), executionGraph.getJsonPlan(), executionGraph.getAccumulatorResultsStringified(), serializedUserAccumulators, executionGraph.getArchivedExecutionConfig(), executionGraph.isStoppable(), executionGraph.getCheckpointCoordinatorConfiguration(), executionGraph.getCheckpointStatsSnapshot()); }
Example #26
Source File: CollectResultIteratorTest.java From flink with Apache License 2.0 | 5 votes |
private Tuple2<CollectResultIterator<Integer>, JobClient> createIteratorAndJobClient( List<Integer> expected, TypeSerializer<Integer> serializer) { CollectResultIterator<Integer> iterator = new CollectResultIterator<>( CompletableFuture.completedFuture(TEST_OPERATOR_ID), serializer, ACCUMULATOR_NAME, 0); TestCoordinationRequestHandler<Integer> handler = new TestCoordinationRequestHandler<>( expected, serializer, ACCUMULATOR_NAME); TestJobClient.JobInfoProvider infoProvider = new TestJobClient.JobInfoProvider() { @Override public boolean isJobFinished() { return handler.isClosed(); } @Override public Map<String, OptionalFailure<Object>> getAccumulatorResults() { return handler.getAccumulatorResults(); } }; TestJobClient jobClient = new TestJobClient( TEST_JOB_ID, TEST_OPERATOR_ID, handler, infoProvider); iterator.setJobClient(jobClient); return Tuple2.of(iterator, jobClient); }
Example #27
Source File: JobAccumulatorsInfo.java From flink with Apache License 2.0 | 5 votes |
@JsonCreator public JobAccumulatorsInfo( @JsonProperty(FIELD_NAME_JOB_ACCUMULATORS) List<JobAccumulator> jobAccumulators, @JsonProperty(FIELD_NAME_USER_TASK_ACCUMULATORS) List<UserTaskAccumulator> userAccumulators, @JsonDeserialize(contentUsing = SerializedValueDeserializer.class) @JsonProperty(FIELD_NAME_SERIALIZED_USER_TASK_ACCUMULATORS) Map<String, SerializedValue<OptionalFailure<Object>>> serializedUserAccumulators) { this.jobAccumulators = Preconditions.checkNotNull(jobAccumulators); this.userAccumulators = Preconditions.checkNotNull(userAccumulators); this.serializedUserAccumulators = Preconditions.checkNotNull(serializedUserAccumulators); }
Example #28
Source File: JobExecutionResult.java From flink with Apache License 2.0 | 5 votes |
/** * Creates a new JobExecutionResult. * * @param jobID The job's ID. * @param netRuntime The net runtime of the job (excluding pre-flight phase like the optimizer) in milliseconds * @param accumulators A map of all accumulators produced by the job. */ public JobExecutionResult(JobID jobID, long netRuntime, Map<String, OptionalFailure<Object>> accumulators) { super(jobID); this.netRuntime = netRuntime; if (accumulators != null) { this.accumulatorResults = accumulators; } else { this.accumulatorResults = Collections.emptyMap(); } }
Example #29
Source File: AccumulatorHelper.java From flink with Apache License 2.0 | 5 votes |
/** * Transform the Map with accumulators into a Map containing only the * results. */ public static Map<String, OptionalFailure<Object>> toResultMap(Map<String, Accumulator<?, ?>> accumulators) { Map<String, OptionalFailure<Object>> resultMap = new HashMap<>(); for (Map.Entry<String, Accumulator<?, ?>> entry : accumulators.entrySet()) { resultMap.put(entry.getKey(), wrapUnchecked(entry.getKey(), () -> entry.getValue().getLocalValue())); } return resultMap; }
Example #30
Source File: AccumulatorHelper.java From flink with Apache License 2.0 | 5 votes |
private static <R> OptionalFailure<R> wrapUnchecked(String name, Supplier<R> supplier) { return OptionalFailure.createFrom(() -> { try { return supplier.get(); } catch (RuntimeException ex) { LOG.error("Unexpected error while handling accumulator [" + name + "]", ex); throw new FlinkException(ex); } }); }