org.apache.flink.runtime.state.KeyedBackendSerializationProxy Java Examples
The following examples show how to use
org.apache.flink.runtime.state.KeyedBackendSerializationProxy.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RocksDBFullRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Restore the KV-state / ColumnFamily meta data for all key-groups referenced by the current state handle. */ private void restoreKVStateMetaData() throws IOException, StateMigrationException { KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(currentStateHandleInView); this.keygroupStreamCompressionDecorator = serializationProxy.isUsingKeyGroupCompression() ? SnappyStreamCompressionDecorator.INSTANCE : UncompressedStreamCompressionDecorator.INSTANCE; List<StateMetaInfoSnapshot> restoredMetaInfos = serializationProxy.getStateMetaInfoSnapshots(); currentStateHandleKVStateColumnFamilies = new ArrayList<>(restoredMetaInfos.size()); for (StateMetaInfoSnapshot restoredMetaInfo : restoredMetaInfos) { RocksDbKvStateInfo registeredStateCFHandle = getOrRegisterStateColumnFamilyHandle(null, restoredMetaInfo); currentStateHandleKVStateColumnFamilies.add(registeredStateCFHandle.columnFamilyHandle); } }
Example #2
Source File: AbstractRocksDBRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
KeyedBackendSerializationProxy<K> readMetaData(DataInputView dataInputView) throws IOException, StateMigrationException { // isSerializerPresenceRequired flag is set to false, since for the RocksDB state backend, // deserialization of state happens lazily during runtime; we depend on the fact // that the new serializer for states could be compatible, and therefore the restore can continue // without old serializers required to be present. KeyedBackendSerializationProxy<K> serializationProxy = new KeyedBackendSerializationProxy<>(userCodeClassLoader); serializationProxy.read(dataInputView); if (!isKeySerializerCompatibilityChecked) { // check for key serializer compatibility; this also reconfigures the // key serializer to be compatible, if it is required and is possible TypeSerializerSchemaCompatibility<K> keySerializerSchemaCompat = keySerializerProvider.setPreviousSerializerSnapshotForRestoredState(serializationProxy.getKeySerializerSnapshot()); if (keySerializerSchemaCompat.isCompatibleAfterMigration() || keySerializerSchemaCompat.isIncompatible()) { throw new StateMigrationException("The new key serializer must be compatible."); } isKeySerializerCompatibilityChecked = true; } return serializationProxy; }
Example #3
Source File: AbstractRocksDBRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
KeyedBackendSerializationProxy<K> readMetaData(DataInputView dataInputView) throws IOException, StateMigrationException { // isSerializerPresenceRequired flag is set to false, since for the RocksDB state backend, // deserialization of state happens lazily during runtime; we depend on the fact // that the new serializer for states could be compatible, and therefore the restore can continue // without old serializers required to be present. KeyedBackendSerializationProxy<K> serializationProxy = new KeyedBackendSerializationProxy<>(userCodeClassLoader); serializationProxy.read(dataInputView); if (!isKeySerializerCompatibilityChecked) { // check for key serializer compatibility; this also reconfigures the // key serializer to be compatible, if it is required and is possible TypeSerializerSchemaCompatibility<K> keySerializerSchemaCompat = keySerializerProvider.setPreviousSerializerSnapshotForRestoredState(serializationProxy.getKeySerializerSnapshot()); if (keySerializerSchemaCompat.isCompatibleAfterMigration() || keySerializerSchemaCompat.isIncompatible()) { throw new StateMigrationException("The new key serializer must be compatible."); } isKeySerializerCompatibilityChecked = true; } return serializationProxy; }
Example #4
Source File: RocksFullSnapshotStrategy.java From flink with Apache License 2.0 | 6 votes |
private void writeKVStateMetaData( final List<Tuple2<RocksIteratorWrapper, Integer>> kvStateIterators, final ReadOptions readOptions, final DataOutputView outputView) throws IOException { int kvStateId = 0; for (MetaData metaDataEntry : metaData) { RocksIteratorWrapper rocksIteratorWrapper = getRocksIterator( db, metaDataEntry.rocksDbKvStateInfo.columnFamilyHandle, metaDataEntry.stateSnapshotTransformer, readOptions); kvStateIterators.add(Tuple2.of(rocksIteratorWrapper, kvStateId)); ++kvStateId; } KeyedBackendSerializationProxy<K> serializationProxy = new KeyedBackendSerializationProxy<>( // TODO: this code assumes that writing a serializer is threadsafe, we should support to // get a serialized form already at state registration time in the future keySerializer, stateMetaInfoSnapshots, !Objects.equals( UncompressedStreamCompressionDecorator.INSTANCE, keyGroupCompressionDecorator)); serializationProxy.write(outputView); }
Example #5
Source File: RocksDBFullRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
/** * Restore the KV-state / ColumnFamily meta data for all key-groups referenced by the current state handle. */ private void restoreKVStateMetaData() throws IOException, StateMigrationException { KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(currentStateHandleInView); this.keygroupStreamCompressionDecorator = serializationProxy.isUsingKeyGroupCompression() ? SnappyStreamCompressionDecorator.INSTANCE : UncompressedStreamCompressionDecorator.INSTANCE; List<StateMetaInfoSnapshot> restoredMetaInfos = serializationProxy.getStateMetaInfoSnapshots(); currentStateHandleKVStateColumnFamilies = new ArrayList<>(restoredMetaInfos.size()); for (StateMetaInfoSnapshot restoredMetaInfo : restoredMetaInfos) { RocksDbKvStateInfo registeredStateCFHandle = getOrRegisterStateColumnFamilyHandle(null, restoredMetaInfo); currentStateHandleKVStateColumnFamilies.add(registeredStateCFHandle.columnFamilyHandle); } }
Example #6
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
/** * Reads Flink's state meta data file from the state handle. */ private KeyedBackendSerializationProxy<K> readMetaData(StreamStateHandle metaStateHandle) throws Exception { FSDataInputStream inputStream = null; try { inputStream = metaStateHandle.openInputStream(); cancelStreamRegistry.registerCloseable(inputStream); DataInputView in = new DataInputViewStreamWrapper(inputStream); return readMetaData(in); } finally { if (cancelStreamRegistry.unregisterCloseable(inputStream)) { inputStream.close(); } } }
Example #7
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
private void restoreFromLocalState(IncrementalLocalKeyedStateHandle localKeyedStateHandle) throws Exception { KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(localKeyedStateHandle.getMetaDataState()); List<StateMetaInfoSnapshot> stateMetaInfoSnapshots = serializationProxy.getStateMetaInfoSnapshots(); columnFamilyDescriptors = createAndRegisterColumnFamilyDescriptors(stateMetaInfoSnapshots, true); columnFamilyHandles = new ArrayList<>(columnFamilyDescriptors.size() + 1); Path restoreSourcePath = localKeyedStateHandle.getDirectoryStateHandle().getDirectory(); LOG.debug("Restoring keyed backend uid in operator {} from incremental snapshot to {}.", operatorIdentifier, backendUID); if (!instanceRocksDBPath.mkdirs()) { String errMsg = "Could not create RocksDB data directory: " + instanceBasePath.getAbsolutePath(); LOG.error(errMsg); throw new IOException(errMsg); } restoreInstanceDirectoryFromPath(restoreSourcePath, dbPath); openDB(); registerColumnFamilyHandles(stateMetaInfoSnapshots); }
Example #8
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
private void restoreFromLocalState(IncrementalLocalKeyedStateHandle localKeyedStateHandle) throws Exception { KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(localKeyedStateHandle.getMetaDataState()); List<StateMetaInfoSnapshot> stateMetaInfoSnapshots = serializationProxy.getStateMetaInfoSnapshots(); columnFamilyDescriptors = createAndRegisterColumnFamilyDescriptors(stateMetaInfoSnapshots, true); columnFamilyHandles = new ArrayList<>(columnFamilyDescriptors.size() + 1); Path restoreSourcePath = localKeyedStateHandle.getDirectoryStateHandle().getDirectory(); LOG.debug("Restoring keyed backend uid in operator {} from incremental snapshot to {}.", operatorIdentifier, backendUID); if (!instanceRocksDBPath.mkdirs()) { String errMsg = "Could not create RocksDB data directory: " + instanceBasePath.getAbsolutePath(); LOG.error(errMsg); throw new IOException(errMsg); } restoreInstanceDirectoryFromPath(restoreSourcePath, dbPath); openDB(); registerColumnFamilyHandles(stateMetaInfoSnapshots); }
Example #9
Source File: RocksDBFullRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
/** * Restore the KV-state / ColumnFamily meta data for all key-groups referenced by the current state handle. */ private void restoreKVStateMetaData() throws IOException, StateMigrationException { KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(currentStateHandleInView); this.keygroupStreamCompressionDecorator = serializationProxy.isUsingKeyGroupCompression() ? SnappyStreamCompressionDecorator.INSTANCE : UncompressedStreamCompressionDecorator.INSTANCE; List<StateMetaInfoSnapshot> restoredMetaInfos = serializationProxy.getStateMetaInfoSnapshots(); currentStateHandleKVStateColumnFamilies = new ArrayList<>(restoredMetaInfos.size()); for (StateMetaInfoSnapshot restoredMetaInfo : restoredMetaInfos) { RocksDbKvStateInfo registeredStateCFHandle = getOrRegisterStateColumnFamilyHandle(null, restoredMetaInfo); currentStateHandleKVStateColumnFamilies.add(registeredStateCFHandle.columnFamilyHandle); } }
Example #10
Source File: AbstractRocksDBRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
KeyedBackendSerializationProxy<K> readMetaData(DataInputView dataInputView) throws IOException, StateMigrationException { // isSerializerPresenceRequired flag is set to false, since for the RocksDB state backend, // deserialization of state happens lazily during runtime; we depend on the fact // that the new serializer for states could be compatible, and therefore the restore can continue // without old serializers required to be present. KeyedBackendSerializationProxy<K> serializationProxy = new KeyedBackendSerializationProxy<>(userCodeClassLoader); serializationProxy.read(dataInputView); if (!isKeySerializerCompatibilityChecked) { // check for key serializer compatibility; this also reconfigures the // key serializer to be compatible, if it is required and is possible TypeSerializerSchemaCompatibility<K> keySerializerSchemaCompat = keySerializerProvider.setPreviousSerializerSnapshotForRestoredState(serializationProxy.getKeySerializerSnapshot()); if (keySerializerSchemaCompat.isCompatibleAfterMigration() || keySerializerSchemaCompat.isIncompatible()) { throw new StateMigrationException("The new key serializer must be compatible."); } isKeySerializerCompatibilityChecked = true; } return serializationProxy; }
Example #11
Source File: RocksFullSnapshotStrategy.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void writeKVStateMetaData( final List<Tuple2<RocksIteratorWrapper, Integer>> kvStateIterators, final ReadOptions readOptions, final DataOutputView outputView) throws IOException { int kvStateId = 0; for (MetaData metaDataEntry : metaData) { RocksIteratorWrapper rocksIteratorWrapper = getRocksIterator( db, metaDataEntry.rocksDbKvStateInfo.columnFamilyHandle, metaDataEntry.stateSnapshotTransformer, readOptions); kvStateIterators.add(Tuple2.of(rocksIteratorWrapper, kvStateId)); ++kvStateId; } KeyedBackendSerializationProxy<K> serializationProxy = new KeyedBackendSerializationProxy<>( // TODO: this code assumes that writing a serializer is threadsafe, we should support to // get a serialized form already at state registration time in the future keySerializer, stateMetaInfoSnapshots, !Objects.equals( UncompressedStreamCompressionDecorator.INSTANCE, keyGroupCompressionDecorator)); serializationProxy.write(outputView); }
Example #12
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
/** * Reads Flink's state meta data file from the state handle. */ private KeyedBackendSerializationProxy<K> readMetaData(StreamStateHandle metaStateHandle) throws Exception { InputStream inputStream = null; try { inputStream = metaStateHandle.openInputStream(); cancelStreamRegistry.registerCloseable(inputStream); DataInputView in = new DataInputViewStreamWrapper(inputStream); return readMetaData(in); } finally { if (cancelStreamRegistry.unregisterCloseable(inputStream)) { inputStream.close(); } } }
Example #13
Source File: RocksDBIncrementalRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Reads Flink's state meta data file from the state handle. */ private KeyedBackendSerializationProxy<K> readMetaData(StreamStateHandle metaStateHandle) throws Exception { FSDataInputStream inputStream = null; try { inputStream = metaStateHandle.openInputStream(); cancelStreamRegistry.registerCloseable(inputStream); DataInputView in = new DataInputViewStreamWrapper(inputStream); return readMetaData(in); } finally { if (cancelStreamRegistry.unregisterCloseable(inputStream)) { inputStream.close(); } } }
Example #14
Source File: RocksFullSnapshotStrategy.java From flink with Apache License 2.0 | 6 votes |
private void writeKVStateMetaData( final List<Tuple2<RocksIteratorWrapper, Integer>> kvStateIterators, final ReadOptions readOptions, final DataOutputView outputView) throws IOException { int kvStateId = 0; for (MetaData metaDataEntry : metaData) { RocksIteratorWrapper rocksIteratorWrapper = getRocksIterator( db, metaDataEntry.rocksDbKvStateInfo.columnFamilyHandle, metaDataEntry.stateSnapshotTransformer, readOptions); kvStateIterators.add(Tuple2.of(rocksIteratorWrapper, kvStateId)); ++kvStateId; } KeyedBackendSerializationProxy<K> serializationProxy = new KeyedBackendSerializationProxy<>( // TODO: this code assumes that writing a serializer is threadsafe, we should support to // get a serialized form already at state registration time in the future keySerializer, stateMetaInfoSnapshots, !Objects.equals( UncompressedStreamCompressionDecorator.INSTANCE, keyGroupCompressionDecorator)); serializationProxy.write(outputView); }
Example #15
Source File: RocksDBIncrementalRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void restoreFromLocalState(IncrementalLocalKeyedStateHandle localKeyedStateHandle) throws Exception { KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(localKeyedStateHandle.getMetaDataState()); List<StateMetaInfoSnapshot> stateMetaInfoSnapshots = serializationProxy.getStateMetaInfoSnapshots(); columnFamilyDescriptors = createAndRegisterColumnFamilyDescriptors(stateMetaInfoSnapshots, true); columnFamilyHandles = new ArrayList<>(columnFamilyDescriptors.size() + 1); Path restoreSourcePath = localKeyedStateHandle.getDirectoryStateHandle().getDirectory(); LOG.debug("Restoring keyed backend uid in operator {} from incremental snapshot to {}.", operatorIdentifier, backendUID); if (!instanceRocksDBPath.mkdirs()) { String errMsg = "Could not create RocksDB data directory: " + instanceBasePath.getAbsolutePath(); LOG.error(errMsg); throw new IOException(errMsg); } restoreInstanceDirectoryFromPath(restoreSourcePath, dbPath); openDB(); registerColumnFamilyHandles(stateMetaInfoSnapshots); }
Example #16
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 5 votes |
private RestoredDBInstance restoreDBInstanceFromStateHandle( IncrementalRemoteKeyedStateHandle restoreStateHandle, Path temporaryRestoreInstancePath) throws Exception { try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(numberOfTransferringThreads)) { rocksDBStateDownloader.transferAllStateDataToDirectory( restoreStateHandle, temporaryRestoreInstancePath, cancelStreamRegistry); } KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(restoreStateHandle.getMetaStateHandle()); // read meta data List<StateMetaInfoSnapshot> stateMetaInfoSnapshots = serializationProxy.getStateMetaInfoSnapshots(); List<ColumnFamilyDescriptor> columnFamilyDescriptors = createAndRegisterColumnFamilyDescriptors(stateMetaInfoSnapshots, false); List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(stateMetaInfoSnapshots.size() + 1); RocksDB restoreDb = RocksDBOperationUtils.openDB( temporaryRestoreInstancePath.toString(), columnFamilyDescriptors, columnFamilyHandles, RocksDBOperationUtils.createColumnFamilyOptions(columnFamilyOptionsFactory, "default"), dbOptions); return new RestoredDBInstance(restoreDb, columnFamilyHandles, columnFamilyDescriptors, stateMetaInfoSnapshots); }
Example #17
Source File: OperatorStateWriter.java From bravo with Apache License 2.0 | 5 votes |
private void updateProxy() { if (proxy == null && keySerializer == null) { throw new IllegalStateException( "KeySerializer must be defined when adding state to a previously stateless operator. Use writer.setKeySerializer(...)"); } proxy = new KeyedBackendSerializationProxy<>( getKeySerializer(), new ArrayList<>(metaSnapshots.values()), proxy != null ? proxy.isUsingKeyGroupCompression() : true); }
Example #18
Source File: RocksDBSavepointIterator.java From bravo with Apache License 2.0 | 5 votes |
private boolean openIfNeeded() throws Exception { if (stateHandleInStream == null) { LOGGER.debug("Opening {}", keyGroupsStateHandle.getDelegateStateHandle()); stateHandleInStream = keyGroupsStateHandle.openInputStream(); final KeyedBackendSerializationProxy<?> serializationProxy = StateMetadataUtils .getKeyedBackendSerializationProxy(keyGroupsStateHandle); this.stateIdMapping = StateMetadataUtils.getStateIdMapping(serializationProxy); final StreamCompressionDecorator streamCompressionDecorator = StateMetadataUtils .getCompressionDecorator(serializationProxy); final KeyGroupRangeOffsets rangeOffsets = keyGroupsStateHandle.getGroupRangeOffsets(); LOGGER.debug("{}", rangeOffsets); offsetsIt = new ValidOffsetsIterator(rangeOffsets); hasNext = seekNextOffset(); if (hasNext) { final InputStream compressedInputStream = streamCompressionDecorator .decorateWithCompression(stateHandleInStream); compressedInputView = new DataInputViewStreamWrapper(compressedInputStream); seekNextStateId(false); } } return hasNext; }
Example #19
Source File: OperatorStateReader.java From bravo with Apache License 2.0 | 5 votes |
private TypeSerializer<?> getKeySerializer(KeyedBackendSerializationProxy<?> proxy) { TypeSerializer<?> keySerializer = proxy.getKeySerializerConfigSnapshot().restoreSerializer(); if (keySerializer instanceof TupleSerializerBase) { TupleSerializerBase ts = (TupleSerializerBase) keySerializer; if (ts.getTupleClass().equals(Tuple1.class)) { return ts.getFieldSerializers()[0]; } } return keySerializer; }
Example #20
Source File: OperatorStateReader.java From bravo with Apache License 2.0 | 5 votes |
/** * Read keyed states using the provided reader for further processing * * @return The DataSet containing the deseralized state elements */ public <K, V, O> DataSet<O> readKeyedStates(KeyedStateReader<K, V, O> reader) throws Exception { readKeyedStates(); KeyedBackendSerializationProxy<?> proxy = StateMetadataUtils.getKeyedBackendSerializationProxy(opState) .orElseThrow(() -> new IllegalStateException("Cannot read state of a stateless operator.")); reader.configure(opState.getMaxParallelism(), getKeySerializer(proxy), StateMetadataUtils.getSerializer(proxy, reader.getStateName()) .orElseThrow(() -> new IllegalArgumentException("Cannot find state " + reader.getStateName()))); DataSet<O> parsedState = allKeyedStateRows.flatMap(reader); readStates.add(reader.getStateName()); return parsedState; }
Example #21
Source File: StateMetadataUtils.java From bravo with Apache License 2.0 | 5 votes |
public static KeyedBackendSerializationProxy<?> getKeyedBackendSerializationProxy( StreamStateHandle streamStateHandle) { KeyedBackendSerializationProxy<Integer> serializationProxy = new KeyedBackendSerializationProxy<>( StateMetadataUtils.class.getClassLoader()); try (FSDataInputStream is = streamStateHandle.openInputStream()) { DataInputViewStreamWrapper iw = new DataInputViewStreamWrapper(is); serializationProxy.read(iw); return serializationProxy; } catch (IOException e) { throw new RuntimeException(e); } }
Example #22
Source File: StateMetadataUtils.java From bravo with Apache License 2.0 | 5 votes |
public static Map<Integer, String> getStateIdMapping(KeyedBackendSerializationProxy<?> proxy) { Map<Integer, String> stateIdMapping = new HashMap<>(); int stateId = 0; for (StateMetaInfoSnapshot snapshot : proxy.getStateMetaInfoSnapshots()) { stateIdMapping.put(stateId, snapshot.getName()); stateId++; } return stateIdMapping; }
Example #23
Source File: StateMetadataUtils.java From bravo with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public static <T> Optional<TypeSerializer<T>> getSerializer(KeyedBackendSerializationProxy<?> proxy, String stateName) { for (StateMetaInfoSnapshot snapshot : proxy.getStateMetaInfoSnapshots()) { if (snapshot.getName().equals(stateName)) { return Optional .of((TypeSerializer<T>) snapshot .getTypeSerializerSnapshot(CommonSerializerKeys.VALUE_SERIALIZER) .restoreSerializer()); } } return Optional.empty(); }
Example #24
Source File: StateMetadataUtils.java From bravo with Apache License 2.0 | 5 votes |
public static Optional<KeyedBackendSerializationProxy<?>> getKeyedBackendSerializationProxy(OperatorState opState) { try { KeyedStateHandle firstHandle = opState.getStates().iterator().next().getManagedKeyedState().iterator() .next(); if (firstHandle instanceof IncrementalKeyedStateHandle) { return Optional.of(getKeyedBackendSerializationProxy( ((IncrementalKeyedStateHandle) firstHandle).getMetaStateHandle())); } else { return Optional.of(getKeyedBackendSerializationProxy((StreamStateHandle) firstHandle)); } } catch (Exception e) { return Optional.empty(); } }
Example #25
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 5 votes |
private RestoredDBInstance restoreDBInstanceFromStateHandle( IncrementalRemoteKeyedStateHandle restoreStateHandle, Path temporaryRestoreInstancePath) throws Exception { try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(numberOfTransferringThreads)) { rocksDBStateDownloader.transferAllStateDataToDirectory( restoreStateHandle, temporaryRestoreInstancePath, cancelStreamRegistry); } KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(restoreStateHandle.getMetaStateHandle()); // read meta data List<StateMetaInfoSnapshot> stateMetaInfoSnapshots = serializationProxy.getStateMetaInfoSnapshots(); List<ColumnFamilyDescriptor> columnFamilyDescriptors = createAndRegisterColumnFamilyDescriptors(stateMetaInfoSnapshots, false); List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(stateMetaInfoSnapshots.size() + 1); RocksDB restoreDb = RocksDBOperationUtils.openDB( temporaryRestoreInstancePath.getPath(), columnFamilyDescriptors, columnFamilyHandles, RocksDBOperationUtils.createColumnFamilyOptions(columnFamilyOptionsFactory, "default"), dbOptions); return new RestoredDBInstance(restoreDb, columnFamilyHandles, columnFamilyDescriptors, stateMetaInfoSnapshots); }
Example #26
Source File: RocksDBIncrementalRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private RestoredDBInstance restoreDBInstanceFromStateHandle( IncrementalRemoteKeyedStateHandle restoreStateHandle, Path temporaryRestoreInstancePath) throws Exception { try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(numberOfTransferringThreads)) { rocksDBStateDownloader.transferAllStateDataToDirectory( restoreStateHandle, temporaryRestoreInstancePath, cancelStreamRegistry); } KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(restoreStateHandle.getMetaStateHandle()); // read meta data List<StateMetaInfoSnapshot> stateMetaInfoSnapshots = serializationProxy.getStateMetaInfoSnapshots(); List<ColumnFamilyDescriptor> columnFamilyDescriptors = createAndRegisterColumnFamilyDescriptors(stateMetaInfoSnapshots, false); List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(stateMetaInfoSnapshots.size() + 1); RocksDB restoreDb = RocksDBOperationUtils.openDB( temporaryRestoreInstancePath.getPath(), columnFamilyDescriptors, columnFamilyHandles, RocksDBOperationUtils.createColumnFamilyOptions(columnFamilyOptionsFactory, "default"), dbOptions); return new RestoredDBInstance(restoreDb, columnFamilyHandles, columnFamilyDescriptors, stateMetaInfoSnapshots); }
Example #27
Source File: StateMetadataUtils.java From bravo with Apache License 2.0 | 4 votes |
public static StreamCompressionDecorator getCompressionDecorator(KeyedBackendSerializationProxy<?> proxy) { return proxy.isUsingKeyGroupCompression() ? SnappyStreamCompressionDecorator.INSTANCE : UncompressedStreamCompressionDecorator.INSTANCE; }
Example #28
Source File: HeapRestoreOperation.java From flink with Apache License 2.0 | 4 votes |
@Override public Void restore() throws Exception { final Map<Integer, StateMetaInfoSnapshot> kvStatesById = new HashMap<>(); registeredKVStates.clear(); registeredPQStates.clear(); boolean keySerializerRestored = false; for (KeyedStateHandle keyedStateHandle : restoreStateHandles) { if (keyedStateHandle == null) { continue; } if (!(keyedStateHandle instanceof KeyGroupsStateHandle)) { throw new IllegalStateException("Unexpected state handle type, " + "expected: " + KeyGroupsStateHandle.class + ", but found: " + keyedStateHandle.getClass()); } KeyGroupsStateHandle keyGroupsStateHandle = (KeyGroupsStateHandle) keyedStateHandle; FSDataInputStream fsDataInputStream = keyGroupsStateHandle.openInputStream(); cancelStreamRegistry.registerCloseable(fsDataInputStream); try { DataInputViewStreamWrapper inView = new DataInputViewStreamWrapper(fsDataInputStream); KeyedBackendSerializationProxy<K> serializationProxy = new KeyedBackendSerializationProxy<>(userCodeClassLoader); serializationProxy.read(inView); if (!keySerializerRestored) { // check for key serializer compatibility; this also reconfigures the // key serializer to be compatible, if it is required and is possible TypeSerializerSchemaCompatibility<K> keySerializerSchemaCompat = keySerializerProvider.setPreviousSerializerSnapshotForRestoredState(serializationProxy.getKeySerializerSnapshot()); if (keySerializerSchemaCompat.isCompatibleAfterMigration() || keySerializerSchemaCompat.isIncompatible()) { throw new StateMigrationException("The new key serializer must be compatible."); } keySerializerRestored = true; } List<StateMetaInfoSnapshot> restoredMetaInfos = serializationProxy.getStateMetaInfoSnapshots(); createOrCheckStateForMetaInfo(restoredMetaInfos, kvStatesById); readStateHandleStateData( fsDataInputStream, inView, keyGroupsStateHandle.getGroupRangeOffsets(), kvStatesById, restoredMetaInfos.size(), serializationProxy.getReadVersion(), serializationProxy.isUsingKeyGroupCompression()); } finally { if (cancelStreamRegistry.unregisterCloseable(fsDataInputStream)) { IOUtils.closeQuietly(fsDataInputStream); } } } return null; }
Example #29
Source File: RocksIncrementalSnapshotStrategy.java From flink with Apache License 2.0 | 4 votes |
@Nonnull private SnapshotResult<StreamStateHandle> materializeMetaData() throws Exception { CheckpointStreamWithResultProvider streamWithResultProvider = localRecoveryConfig.isLocalRecoveryEnabled() ? CheckpointStreamWithResultProvider.createDuplicatingStream( checkpointId, CheckpointedStateScope.EXCLUSIVE, checkpointStreamFactory, localRecoveryConfig.getLocalStateDirectoryProvider()) : CheckpointStreamWithResultProvider.createSimpleStream( CheckpointedStateScope.EXCLUSIVE, checkpointStreamFactory); snapshotCloseableRegistry.registerCloseable(streamWithResultProvider); try { //no need for compression scheme support because sst-files are already compressed KeyedBackendSerializationProxy<K> serializationProxy = new KeyedBackendSerializationProxy<>( keySerializer, stateMetaInfoSnapshots, false); DataOutputView out = new DataOutputViewStreamWrapper(streamWithResultProvider.getCheckpointOutputStream()); serializationProxy.write(out); if (snapshotCloseableRegistry.unregisterCloseable(streamWithResultProvider)) { SnapshotResult<StreamStateHandle> result = streamWithResultProvider.closeAndFinalizeCheckpointStreamResult(); streamWithResultProvider = null; return result; } else { throw new IOException("Stream already closed and cannot return a handle."); } } finally { if (streamWithResultProvider != null) { if (snapshotCloseableRegistry.unregisterCloseable(streamWithResultProvider)) { IOUtils.closeQuietly(streamWithResultProvider); } } } }
Example #30
Source File: HeapRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public Void restore() throws Exception { final Map<Integer, StateMetaInfoSnapshot> kvStatesById = new HashMap<>(); registeredKVStates.clear(); registeredPQStates.clear(); boolean keySerializerRestored = false; for (KeyedStateHandle keyedStateHandle : restoreStateHandles) { if (keyedStateHandle == null) { continue; } if (!(keyedStateHandle instanceof KeyGroupsStateHandle)) { throw new IllegalStateException("Unexpected state handle type, " + "expected: " + KeyGroupsStateHandle.class + ", but found: " + keyedStateHandle.getClass()); } KeyGroupsStateHandle keyGroupsStateHandle = (KeyGroupsStateHandle) keyedStateHandle; FSDataInputStream fsDataInputStream = keyGroupsStateHandle.openInputStream(); cancelStreamRegistry.registerCloseable(fsDataInputStream); try { DataInputViewStreamWrapper inView = new DataInputViewStreamWrapper(fsDataInputStream); KeyedBackendSerializationProxy<K> serializationProxy = new KeyedBackendSerializationProxy<>(userCodeClassLoader); serializationProxy.read(inView); if (!keySerializerRestored) { // check for key serializer compatibility; this also reconfigures the // key serializer to be compatible, if it is required and is possible TypeSerializerSchemaCompatibility<K> keySerializerSchemaCompat = keySerializerProvider.setPreviousSerializerSnapshotForRestoredState(serializationProxy.getKeySerializerSnapshot()); if (keySerializerSchemaCompat.isCompatibleAfterMigration() || keySerializerSchemaCompat.isIncompatible()) { throw new StateMigrationException("The new key serializer must be compatible."); } keySerializerRestored = true; } List<StateMetaInfoSnapshot> restoredMetaInfos = serializationProxy.getStateMetaInfoSnapshots(); createOrCheckStateForMetaInfo(restoredMetaInfos, kvStatesById); readStateHandleStateData( fsDataInputStream, inView, keyGroupsStateHandle.getGroupRangeOffsets(), kvStatesById, restoredMetaInfos.size(), serializationProxy.getReadVersion(), serializationProxy.isUsingKeyGroupCompression()); } finally { if (cancelStreamRegistry.unregisterCloseable(fsDataInputStream)) { IOUtils.closeQuietly(fsDataInputStream); } } } return null; }