org.apache.flink.util.FlinkRuntimeException Java Examples
The following examples show how to use
org.apache.flink.util.FlinkRuntimeException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: JobMasterTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public Collection<SlotOffer> offerSlots(TaskManagerLocation taskManagerLocation, TaskManagerGateway taskManagerGateway, Collection<SlotOffer> offers) { hasReceivedSlotOffers.trigger(); final Collection<SlotInfo> slotInfos = Optional.ofNullable(registeredSlots.get(taskManagerLocation.getResourceID())) .orElseThrow(() -> new FlinkRuntimeException("TaskManager not registered.")); int slotIndex = slotInfos.size(); for (SlotOffer offer : offers) { slotInfos.add(new SimpleSlotContext( offer.getAllocationId(), taskManagerLocation, slotIndex, taskManagerGateway)); slotIndex++; } return offers; }
Example #2
Source File: EntropyInjector.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Removes the entropy marker string from the path, if the given file system is an * entropy-injecting file system (implements {@link EntropyInjectingFileSystem}) and * the entropy marker key is present. Otherwise, this returns the path as is. * * @param path The path to filter. * @return The path without the marker string. */ public static Path removeEntropyMarkerIfPresent(FileSystem fs, Path path) { final EntropyInjectingFileSystem efs = getEntropyFs(fs); if (efs == null) { return path; } else { try { return resolveEntropy(path, efs, false); } catch (IOException e) { // this should never happen, because the path was valid before and we only remove characters. // rethrow to silence the compiler throw new FlinkRuntimeException(e.getMessage(), e); } } }
Example #3
Source File: RestServerEndpointITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testEndpointsMustBeUnique() throws Exception { final RestServerEndpointConfiguration serverConfig = RestServerEndpointConfiguration.fromConfiguration(config); final List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers = Arrays.asList( Tuple2.of(new TestHeaders(), testHandler), Tuple2.of(new TestHeaders(), testUploadHandler) ); assertThrows("REST handler registration", FlinkRuntimeException.class, () -> { try (TestRestServerEndpoint restServerEndpoint = new TestRestServerEndpoint(serverConfig, handlers)) { restServerEndpoint.start(); return null; } }); }
Example #4
Source File: NFA.java From flink with Apache License 2.0 | 6 votes |
private State<T> findFinalStateAfterProceed( ConditionContext context, State<T> state, T event) { final Stack<State<T>> statesToCheck = new Stack<>(); statesToCheck.push(state); try { while (!statesToCheck.isEmpty()) { final State<T> currentState = statesToCheck.pop(); for (StateTransition<T> transition : currentState.getStateTransitions()) { if (transition.getAction() == StateTransitionAction.PROCEED && checkFilterCondition(context, transition.getCondition(), event)) { if (transition.getTargetState().isFinal()) { return transition.getTargetState(); } else { statesToCheck.push(transition.getTargetState()); } } } } } catch (Exception e) { throw new FlinkRuntimeException("Failure happened in filter function.", e); } return null; }
Example #5
Source File: SchedulerTestBase.java From flink with Apache License 2.0 | 6 votes |
public int getNumberOfAvailableSlotsForGroup(SlotSharingGroupId slotSharingGroupId, JobVertexID jobVertexId) { final SlotSharingManager multiTaskSlotManager = slotSharingManagersMap.get(slotSharingGroupId); if (multiTaskSlotManager != null) { int availableSlots = 0; for (SlotSharingManager.MultiTaskSlot multiTaskSlot : multiTaskSlotManager.getResolvedRootSlots()) { if (!multiTaskSlot.contains(jobVertexId)) { availableSlots++; } } return availableSlots; } else { throw new FlinkRuntimeException("No MultiTaskSlotmanager registered under " + slotSharingGroupId + '.'); } }
Example #6
Source File: RocksDBMapState.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public UV getValue() { if (deleted) { return null; } else { if (userValue == null) { try { userValue = deserializeUserValue(dataInputView, rawValueBytes, valueSerializer); } catch (IOException e) { throw new FlinkRuntimeException("Error while deserializing the user value.", e); } } return userValue; } }
Example #7
Source File: ResultPartitionFactory.java From flink with Apache License 2.0 | 6 votes |
private static void initializeBoundedBlockingPartitions( ResultSubpartition[] subpartitions, ResultPartition parent, BoundedBlockingSubpartitionType blockingSubpartitionType, int networkBufferSize, FileChannelManager channelManager) { int i = 0; try { for (i = 0; i < subpartitions.length; i++) { final File spillFile = channelManager.createChannel().getPathFile(); subpartitions[i] = blockingSubpartitionType.create(i, parent, spillFile, networkBufferSize); } } catch (IOException e) { // undo all the work so that a failed constructor does not leave any resources // in need of disposal releasePartitionsQuietly(subpartitions, i); // this is not good, we should not be forced to wrap this in a runtime exception. // the fact that the ResultPartition and Task constructor (which calls this) do not tolerate any exceptions // is incompatible with eager initialization of resources (RAII). throw new FlinkRuntimeException(e); } }
Example #8
Source File: MockKeyedStateBackend.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override @SuppressWarnings("unchecked") @Nonnull public <N, SV, SEV, S extends State, IS extends S> IS createInternalState( @Nonnull TypeSerializer<N> namespaceSerializer, @Nonnull StateDescriptor<S, SV> stateDesc, @Nonnull StateSnapshotTransformFactory<SEV> snapshotTransformFactory) throws Exception { StateFactory stateFactory = STATE_FACTORIES.get(stateDesc.getClass()); if (stateFactory == null) { String message = String.format("State %s is not supported by %s", stateDesc.getClass(), TtlStateFactory.class); throw new FlinkRuntimeException(message); } IS state = stateFactory.createInternalState(namespaceSerializer, stateDesc); stateSnapshotFilters.put(stateDesc.getName(), (StateSnapshotTransformer<Object>) getStateSnapshotTransformer(stateDesc, snapshotTransformFactory)); ((MockInternalKvState<K, N, SV>) state).values = () -> stateValues .computeIfAbsent(stateDesc.getName(), n -> new HashMap<>()) .computeIfAbsent(getCurrentKey(), k -> new HashMap<>()); return state; }
Example #9
Source File: ApplicationDispatcherBootstrapTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testClusterShutdownWhenSubmissionFails() throws Exception { // we're "listening" on this to be completed to verify that the cluster // is being shut down from the ApplicationDispatcherBootstrap final CompletableFuture<ApplicationStatus> externalShutdownFuture = new CompletableFuture<>(); final TestingDispatcherGateway.Builder dispatcherBuilder = new TestingDispatcherGateway.Builder() .setSubmitFunction(jobGraph -> { throw new FlinkRuntimeException("Nope!"); }) .setClusterShutdownFunction((status) -> { externalShutdownFuture.complete(status); return CompletableFuture.completedFuture(Acknowledge.get()); }); ApplicationDispatcherBootstrap bootstrap = createApplicationDispatcherBootstrap(3); final CompletableFuture<Acknowledge> shutdownFuture = bootstrap.runApplicationAndShutdownClusterAsync(dispatcherBuilder.build(), scheduledExecutor); // wait until the bootstrap "thinks" it's done shutdownFuture.get(TIMEOUT_SECONDS, TimeUnit.SECONDS); // verify that the dispatcher is actually being shut down assertThat(externalShutdownFuture.get(TIMEOUT_SECONDS, TimeUnit.SECONDS), is(ApplicationStatus.FAILED)); }
Example #10
Source File: RocksDBStateDownloader.java From flink with Apache License 2.0 | 6 votes |
/** * Copies all the files from the given stream state handles to the given path, renaming the files w.r.t. their * {@link StateHandleID}. */ private void downloadDataForAllStateHandles( Map<StateHandleID, StreamStateHandle> stateHandleMap, Path restoreInstancePath, CloseableRegistry closeableRegistry) throws Exception { try { List<Runnable> runnables = createDownloadRunnables(stateHandleMap, restoreInstancePath, closeableRegistry); List<CompletableFuture<Void>> futures = new ArrayList<>(runnables.size()); for (Runnable runnable : runnables) { futures.add(CompletableFuture.runAsync(runnable, executorService)); } FutureUtils.waitForAll(futures).get(); } catch (ExecutionException e) { Throwable throwable = ExceptionUtils.stripExecutionException(e); throwable = ExceptionUtils.stripException(throwable, RuntimeException.class); if (throwable instanceof IOException) { throw (IOException) throwable; } else { throw new FlinkRuntimeException("Failed to download data for state handles.", e); } } }
Example #11
Source File: RocksDbTtlCompactFiltersManager.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public int nextUnexpiredOffset(byte[] bytes, long ttl, long currentTimestamp) { input.setBuffer(bytes); int lastElementOffset = 0; while (input.available() > 0) { try { long timestamp = nextElementLastAccessTimestamp(); if (!TtlUtils.expired(timestamp, ttl, currentTimestamp)) { break; } lastElementOffset = input.getPosition(); } catch (IOException e) { throw new FlinkRuntimeException("Failed to deserialize list element for TTL compaction filter", e); } } return lastElementOffset; }
Example #12
Source File: HadoopRecoverableFsDataOutputStream.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private static void ensureTruncateInitialized() throws FlinkRuntimeException { if (truncateHandle == null) { Method truncateMethod; try { truncateMethod = FileSystem.class.getMethod("truncate", Path.class, long.class); } catch (NoSuchMethodException e) { throw new FlinkRuntimeException("Could not find a public truncate method on the Hadoop File System."); } if (!Modifier.isPublic(truncateMethod.getModifiers())) { throw new FlinkRuntimeException("Could not find a public truncate method on the Hadoop File System."); } truncateHandle = truncateMethod; } }
Example #13
Source File: RocksDBMapState.java From flink with Apache License 2.0 | 6 votes |
@Override public UV setValue(UV value) { if (deleted) { throw new IllegalStateException("The value has already been deleted."); } UV oldValue = getValue(); try { userValue = value; rawValueBytes = serializeValueNullSensitive(value, valueSerializer); db.put(columnFamily, writeOptions, rawKeyBytes, rawValueBytes); } catch (IOException | RocksDBException e) { throw new FlinkRuntimeException("Error while putting data into RocksDB.", e); } return oldValue; }
Example #14
Source File: RocksDBListState.java From flink with Apache License 2.0 | 6 votes |
@Override public void updateInternal(List<V> values) { Preconditions.checkNotNull(values, "List of values to add cannot be null."); if (!values.isEmpty()) { try { backend.db.put( columnFamily, writeOptions, serializeCurrentKeyWithGroupAndNamespace(), serializeValueList(values, elementSerializer, DELIMITER)); } catch (IOException | RocksDBException e) { throw new FlinkRuntimeException("Error while updating data to RocksDB", e); } } else { clear(); } }
Example #15
Source File: HeapKeyedStateBackend.java From flink with Apache License 2.0 | 6 votes |
@Override @Nonnull public <N, SV, SEV, S extends State, IS extends S> IS createInternalState( @Nonnull TypeSerializer<N> namespaceSerializer, @Nonnull StateDescriptor<S, SV> stateDesc, @Nonnull StateSnapshotTransformFactory<SEV> snapshotTransformFactory) throws Exception { StateFactory stateFactory = STATE_FACTORIES.get(stateDesc.getClass()); if (stateFactory == null) { String message = String.format("State %s is not supported by %s", stateDesc.getClass(), this.getClass()); throw new FlinkRuntimeException(message); } StateTable<K, N, SV> stateTable = tryRegisterStateTable( namespaceSerializer, stateDesc, getStateSnapshotTransformFactory(stateDesc, snapshotTransformFactory)); return stateFactory.createState(stateDesc, stateTable, getKeySerializer()); }
Example #16
Source File: RocksDBKeyedStateBackend.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override @Nonnull public <N, SV, SEV, S extends State, IS extends S> IS createInternalState( @Nonnull TypeSerializer<N> namespaceSerializer, @Nonnull StateDescriptor<S, SV> stateDesc, @Nonnull StateSnapshotTransformFactory<SEV> snapshotTransformFactory) throws Exception { StateFactory stateFactory = STATE_FACTORIES.get(stateDesc.getClass()); if (stateFactory == null) { String message = String.format("State %s is not supported by %s", stateDesc.getClass(), this.getClass()); throw new FlinkRuntimeException(message); } Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> registerResult = tryRegisterKvStateInformation( stateDesc, namespaceSerializer, snapshotTransformFactory); return stateFactory.createState(stateDesc, registerResult, RocksDBKeyedStateBackend.this); }
Example #17
Source File: TaskSubmissionTestEnvironment.java From flink with Apache License 2.0 | 6 votes |
public TaskSubmissionTestEnvironment build() throws Exception { final TestingRpcService testingRpcService = new TestingRpcService(); final ShuffleEnvironment<?, ?> network = optionalShuffleEnvironment.orElseGet(() -> { try { return createShuffleEnvironment(resourceID, localCommunication, configuration, testingRpcService, mockShuffleEnvironment); } catch (Exception e) { throw new FlinkRuntimeException("Failed to build TaskSubmissionTestEnvironment", e); } }); return new TaskSubmissionTestEnvironment( jobId, jobMasterId, slotSize, jobMasterGateway, configuration, taskManagerActionListeners, metricQueryServiceAddress, testingRpcService, network); }
Example #18
Source File: InternalTimerServiceImpl.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private int countTimersInNamespaceInternal(N namespace, InternalPriorityQueue<TimerHeapInternalTimer<K, N>> queue) { int count = 0; try (final CloseableIterator<TimerHeapInternalTimer<K, N>> iterator = queue.iterator()) { while (iterator.hasNext()) { final TimerHeapInternalTimer<K, N> timer = iterator.next(); if (timer.getNamespace().equals(namespace)) { count++; } } } catch (Exception e) { throw new FlinkRuntimeException("Exception when closing iterator.", e); } return count; }
Example #19
Source File: AbstractRocksDBAppendingState.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
SV getInternal(byte[] key) { try { byte[] valueBytes = backend.db.get(columnFamily, key); if (valueBytes == null) { return null; } dataInputView.setBuffer(valueBytes); return valueSerializer.deserialize(dataInputView); } catch (IOException | RocksDBException e) { throw new FlinkRuntimeException("Error while retrieving data from RocksDB", e); } }
Example #20
Source File: AbstractRocksDBAppendingState.java From flink with Apache License 2.0 | 5 votes |
void updateInternal(byte[] key, SV valueToStore) { try { // write the new value to RocksDB backend.db.put(columnFamily, writeOptions, key, getValueBytes(valueToStore)); } catch (RocksDBException e) { throw new FlinkRuntimeException("Error while adding value to RocksDB", e); } }
Example #21
Source File: RocksDBSerializedCompositeKeyBuilder.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Returns a serialized composite key, from the key and key-group provided in a previous call to * {@link #setKeyAndKeyGroup(Object, int)} and the given namespace. * * @param namespace the namespace to concatenate for the serialized composite key bytes. * @param namespaceSerializer the serializer to obtain the serialized form of the namespace. * @param <N> the type of the namespace. * @return the bytes for the serialized composite key of key-group, key, namespace. */ @Nonnull public <N> byte[] buildCompositeKeyNamespace(@Nonnull N namespace, @Nonnull TypeSerializer<N> namespaceSerializer) { try { serializeNamespace(namespace, namespaceSerializer); final byte[] result = keyOutView.getCopyOfBuffer(); resetToKey(); return result; } catch (IOException shouldNeverHappen) { throw new FlinkRuntimeException(shouldNeverHappen); } }
Example #22
Source File: AbstractRocksDBState.java From flink with Apache License 2.0 | 5 votes |
@Override public void clear() { try { backend.db.delete(columnFamily, writeOptions, serializeCurrentKeyWithGroupAndNamespace()); } catch (RocksDBException e) { throw new FlinkRuntimeException("Error while removing entry from RocksDB", e); } }
Example #23
Source File: TypeInformationTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testOfGenericClassForFlink() { try { TypeInformation.of(Tuple3.class); fail("should fail with an exception"); } catch (FlinkRuntimeException e) { // check that the error message mentions the TypeHint assertNotEquals(-1, e.getMessage().indexOf("TypeHint")); } }
Example #24
Source File: NFA.java From flink with Apache License 2.0 | 5 votes |
private OutgoingEdges<T> createDecisionGraph( ConditionContext context, ComputationState computationState, T event) { State<T> state = getState(computationState); final OutgoingEdges<T> outgoingEdges = new OutgoingEdges<>(state); final Stack<State<T>> states = new Stack<>(); states.push(state); //First create all outgoing edges, so to be able to reason about the Dewey version while (!states.isEmpty()) { State<T> currentState = states.pop(); Collection<StateTransition<T>> stateTransitions = currentState.getStateTransitions(); // check all state transitions for each state for (StateTransition<T> stateTransition : stateTransitions) { try { if (checkFilterCondition(context, stateTransition.getCondition(), event)) { // filter condition is true switch (stateTransition.getAction()) { case PROCEED: // simply advance the computation state, but apply the current event to it // PROCEED is equivalent to an epsilon transition states.push(stateTransition.getTargetState()); break; case IGNORE: case TAKE: outgoingEdges.add(stateTransition); break; } } } catch (Exception e) { throw new FlinkRuntimeException("Failure happened in filter function.", e); } } } return outgoingEdges; }
Example #25
Source File: RestServerEndpoint.java From flink with Apache License 2.0 | 5 votes |
private static void checkAllEndpointsAndHandlersAreUnique(final List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers) { // check for all handlers that // 1) the instance is only registered once // 2) only 1 handler is registered for each endpoint (defined by (version, method, url)) // technically the first check is redundant since a duplicate instance also returns the same headers which // should fail the second check, but we get a better error message final Set<String> uniqueEndpoints = new HashSet<>(); final Set<ChannelInboundHandler> distinctHandlers = Collections.newSetFromMap(new IdentityHashMap<>()); for (Tuple2<RestHandlerSpecification, ChannelInboundHandler> handler : handlers) { boolean isNewHandler = distinctHandlers.add(handler.f1); if (!isNewHandler) { throw new FlinkRuntimeException("Duplicate REST handler instance found." + " Please ensure each instance is registered only once."); } final RestHandlerSpecification headers = handler.f0; for (RestAPIVersion supportedAPIVersion : headers.getSupportedAPIVersions()) { final String parameterizedEndpoint = supportedAPIVersion.toString() + headers.getHttpMethod() + headers.getTargetRestEndpointURL(); // normalize path parameters; distinct path parameters still clash at runtime final String normalizedEndpoint = parameterizedEndpoint.replaceAll(":[\\w-]+", ":param"); boolean isNewEndpoint = uniqueEndpoints.add(normalizedEndpoint); if (!isNewEndpoint) { throw new FlinkRuntimeException( String.format( "REST handler registration overlaps with another registration for: version=%s, method=%s, url=%s.", supportedAPIVersion, headers.getHttpMethod(), headers.getTargetRestEndpointURL())); } } } }
Example #26
Source File: HadoopUtils.java From flink with Apache License 2.0 | 5 votes |
private static Tuple2<Integer, Integer> getMajorMinorBundledHadoopVersion() { String versionString = VersionInfo.getVersion(); String[] versionParts = versionString.split("\\."); if (versionParts.length < 2) { throw new FlinkRuntimeException( "Cannot determine version of Hadoop, unexpected version string: " + versionString); } int maj = Integer.parseInt(versionParts[0]); int min = Integer.parseInt(versionParts[1]); return Tuple2.of(maj, min); }
Example #27
Source File: RocksDBResource.java From flink with Apache License 2.0 | 5 votes |
/** * Creates and returns a new column family with the given name. */ public ColumnFamilyHandle createNewColumnFamily(String name) { try { final ColumnFamilyHandle columnFamily = rocksDB.createColumnFamily( new ColumnFamilyDescriptor(name.getBytes(), columnFamilyOptions)); columnFamilyHandles.add(columnFamily); return columnFamily; } catch (Exception ex) { throw new FlinkRuntimeException("Could not create column family.", ex); } }
Example #28
Source File: RocksDBListState.java From flink with Apache License 2.0 | 5 votes |
@Override @Nullable public byte[] filterOrTransform(@Nullable byte[] value) { if (value == null) { return null; } List<T> result = new ArrayList<>(); DataInputDeserializer in = new DataInputDeserializer(value); T next; int prevPosition = 0; while ((next = deserializeNextElement(in, elementSerializer)) != null) { T transformedElement = elementTransformer.filterOrTransform(next); if (transformedElement != null) { if (transformStrategy == STOP_ON_FIRST_INCLUDED) { return Arrays.copyOfRange(value, prevPosition, value.length); } else { result.add(transformedElement); } } prevPosition = in.getPosition(); } try { return result.isEmpty() ? null : serializeValueList(result, elementSerializer, DELIMITER); } catch (IOException e) { throw new FlinkRuntimeException("Failed to serialize transformed list", e); } }
Example #29
Source File: HadoopUtils.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Checks if the Hadoop dependency is at least of the given version. */ public static boolean isMinHadoopVersion(int major, int minor) throws FlinkRuntimeException { String versionString = VersionInfo.getVersion(); String[] versionParts = versionString.split("\\."); if (versionParts.length < 2) { throw new FlinkRuntimeException( "Cannot determine version of Hadoop, unexpected version string: " + versionString); } int maj = Integer.parseInt(versionParts[0]); int min = Integer.parseInt(versionParts[1]); return maj > major || (maj == major && min >= minor); }
Example #30
Source File: RocksDBCachingPriorityQueueSet.java From flink with Apache License 2.0 | 5 votes |
@Nonnull private E deserializeElement(@Nonnull byte[] bytes) { try { final int numPrefixBytes = groupPrefixBytes.length; inputView.setBuffer(bytes, numPrefixBytes, bytes.length - numPrefixBytes); return byteOrderProducingSerializer.deserialize(inputView); } catch (IOException e) { throw new FlinkRuntimeException("Error while deserializing the element.", e); } }