org.apache.flink.runtime.state.IncrementalRemoteKeyedStateHandle Java Examples
The following examples show how to use
org.apache.flink.runtime.state.IncrementalRemoteKeyedStateHandle.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RocksDBIncrementalRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Recovery from a single remote incremental state without rescaling. */ private void restoreWithoutRescaling(KeyedStateHandle keyedStateHandle) throws Exception { if (keyedStateHandle instanceof IncrementalRemoteKeyedStateHandle) { IncrementalRemoteKeyedStateHandle incrementalRemoteKeyedStateHandle = (IncrementalRemoteKeyedStateHandle) keyedStateHandle; restorePreviousIncrementalFilesStatus(incrementalRemoteKeyedStateHandle); restoreFromRemoteState(incrementalRemoteKeyedStateHandle); } else if (keyedStateHandle instanceof IncrementalLocalKeyedStateHandle) { IncrementalLocalKeyedStateHandle incrementalLocalKeyedStateHandle = (IncrementalLocalKeyedStateHandle) keyedStateHandle; restorePreviousIncrementalFilesStatus(incrementalLocalKeyedStateHandle); restoreFromLocalState(incrementalLocalKeyedStateHandle); } else { throw new BackendBuildingException("Unexpected state handle type, " + "expected " + IncrementalRemoteKeyedStateHandle.class + " or " + IncrementalLocalKeyedStateHandle.class + ", but found " + keyedStateHandle.getClass()); } }
Example #2
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
private IncrementalLocalKeyedStateHandle transferRemoteStateToLocalDirectory( Path temporaryRestoreInstancePath, IncrementalRemoteKeyedStateHandle restoreStateHandle) throws Exception { try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(numberOfTransferringThreads)) { rocksDBStateDownloader.transferAllStateDataToDirectory( restoreStateHandle, temporaryRestoreInstancePath, cancelStreamRegistry); } // since we transferred all remote state to a local directory, we can use the same code as for // local recovery. return new IncrementalLocalKeyedStateHandle( restoreStateHandle.getBackendIdentifier(), restoreStateHandle.getCheckpointId(), new DirectoryStateHandle(temporaryRestoreInstancePath), restoreStateHandle.getKeyGroupRange(), restoreStateHandle.getMetaStateHandle(), restoreStateHandle.getSharedState().keySet()); }
Example #3
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
private void initDBWithRescaling(KeyedStateHandle initialHandle) throws Exception { assert (initialHandle instanceof IncrementalRemoteKeyedStateHandle); // 1. Restore base DB from selected initial handle restoreFromRemoteState((IncrementalRemoteKeyedStateHandle) initialHandle); // 2. Clip the base DB instance try { RocksDBIncrementalCheckpointUtils.clipDBWithKeyGroupRange( db, columnFamilyHandles, keyGroupRange, initialHandle.getKeyGroupRange(), keyGroupPrefixBytes); } catch (RocksDBException e) { String errMsg = "Failed to clip DB after initialization."; LOG.error(errMsg, e); throw new BackendBuildingException(errMsg, e); } }
Example #4
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
/** * Recovery from a single remote incremental state without rescaling. */ private void restoreWithoutRescaling(KeyedStateHandle keyedStateHandle) throws Exception { if (keyedStateHandle instanceof IncrementalRemoteKeyedStateHandle) { IncrementalRemoteKeyedStateHandle incrementalRemoteKeyedStateHandle = (IncrementalRemoteKeyedStateHandle) keyedStateHandle; restorePreviousIncrementalFilesStatus(incrementalRemoteKeyedStateHandle); restoreFromRemoteState(incrementalRemoteKeyedStateHandle); } else if (keyedStateHandle instanceof IncrementalLocalKeyedStateHandle) { IncrementalLocalKeyedStateHandle incrementalLocalKeyedStateHandle = (IncrementalLocalKeyedStateHandle) keyedStateHandle; restorePreviousIncrementalFilesStatus(incrementalLocalKeyedStateHandle); restoreFromLocalState(incrementalLocalKeyedStateHandle); } else { throw new BackendBuildingException("Unexpected state handle type, " + "expected " + IncrementalRemoteKeyedStateHandle.class + " or " + IncrementalLocalKeyedStateHandle.class + ", but found " + keyedStateHandle.getClass()); } }
Example #5
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
/** * Recovery from a single remote incremental state without rescaling. */ private void restoreWithoutRescaling(KeyedStateHandle keyedStateHandle) throws Exception { if (keyedStateHandle instanceof IncrementalRemoteKeyedStateHandle) { IncrementalRemoteKeyedStateHandle incrementalRemoteKeyedStateHandle = (IncrementalRemoteKeyedStateHandle) keyedStateHandle; restorePreviousIncrementalFilesStatus(incrementalRemoteKeyedStateHandle); restoreFromRemoteState(incrementalRemoteKeyedStateHandle); } else if (keyedStateHandle instanceof IncrementalLocalKeyedStateHandle) { IncrementalLocalKeyedStateHandle incrementalLocalKeyedStateHandle = (IncrementalLocalKeyedStateHandle) keyedStateHandle; restorePreviousIncrementalFilesStatus(incrementalLocalKeyedStateHandle); restoreFromLocalState(incrementalLocalKeyedStateHandle); } else { throw new BackendBuildingException("Unexpected state handle type, " + "expected " + IncrementalRemoteKeyedStateHandle.class + " or " + IncrementalLocalKeyedStateHandle.class + ", but found " + keyedStateHandle.getClass()); } }
Example #6
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
private IncrementalLocalKeyedStateHandle transferRemoteStateToLocalDirectory( Path temporaryRestoreInstancePath, IncrementalRemoteKeyedStateHandle restoreStateHandle) throws Exception { try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(numberOfTransferringThreads)) { rocksDBStateDownloader.transferAllStateDataToDirectory( restoreStateHandle, temporaryRestoreInstancePath, cancelStreamRegistry); } // since we transferred all remote state to a local directory, we can use the same code as for // local recovery. return new IncrementalLocalKeyedStateHandle( restoreStateHandle.getBackendIdentifier(), restoreStateHandle.getCheckpointId(), new DirectoryStateHandle(temporaryRestoreInstancePath), restoreStateHandle.getKeyGroupRange(), restoreStateHandle.getMetaStateHandle(), restoreStateHandle.getSharedState().keySet()); }
Example #7
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 6 votes |
private void initDBWithRescaling(KeyedStateHandle initialHandle) throws Exception { assert (initialHandle instanceof IncrementalRemoteKeyedStateHandle); // 1. Restore base DB from selected initial handle restoreFromRemoteState((IncrementalRemoteKeyedStateHandle) initialHandle); // 2. Clip the base DB instance try { RocksDBIncrementalCheckpointUtils.clipDBWithKeyGroupRange( db, columnFamilyHandles, keyGroupRange, initialHandle.getKeyGroupRange(), keyGroupPrefixBytes, writeBatchSize); } catch (RocksDBException e) { String errMsg = "Failed to clip DB after initialization."; LOG.error(errMsg, e); throw new BackendBuildingException(errMsg, e); } }
Example #8
Source File: RocksDBIncrementalRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void initDBWithRescaling(KeyedStateHandle initialHandle) throws Exception { assert (initialHandle instanceof IncrementalRemoteKeyedStateHandle); // 1. Restore base DB from selected initial handle restoreFromRemoteState((IncrementalRemoteKeyedStateHandle) initialHandle); // 2. Clip the base DB instance try { RocksDBIncrementalCheckpointUtils.clipDBWithKeyGroupRange( db, columnFamilyHandles, keyGroupRange, initialHandle.getKeyGroupRange(), keyGroupPrefixBytes); } catch (RocksDBException e) { String errMsg = "Failed to clip DB after initialization."; LOG.error(errMsg, e); throw new BackendBuildingException(errMsg, e); } }
Example #9
Source File: RocksDBIncrementalRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private IncrementalLocalKeyedStateHandle transferRemoteStateToLocalDirectory( Path temporaryRestoreInstancePath, IncrementalRemoteKeyedStateHandle restoreStateHandle) throws Exception { try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(numberOfTransferringThreads)) { rocksDBStateDownloader.transferAllStateDataToDirectory( restoreStateHandle, temporaryRestoreInstancePath, cancelStreamRegistry); } // since we transferred all remote state to a local directory, we can use the same code as for // local recovery. return new IncrementalLocalKeyedStateHandle( restoreStateHandle.getBackendIdentifier(), restoreStateHandle.getCheckpointId(), new DirectoryStateHandle(temporaryRestoreInstancePath), restoreStateHandle.getKeyGroupRange(), restoreStateHandle.getMetaStateHandle(), restoreStateHandle.getSharedState().keySet()); }
Example #10
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 5 votes |
private RestoredDBInstance restoreDBInstanceFromStateHandle( IncrementalRemoteKeyedStateHandle restoreStateHandle, Path temporaryRestoreInstancePath) throws Exception { try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(numberOfTransferringThreads)) { rocksDBStateDownloader.transferAllStateDataToDirectory( restoreStateHandle, temporaryRestoreInstancePath, cancelStreamRegistry); } KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(restoreStateHandle.getMetaStateHandle()); // read meta data List<StateMetaInfoSnapshot> stateMetaInfoSnapshots = serializationProxy.getStateMetaInfoSnapshots(); List<ColumnFamilyDescriptor> columnFamilyDescriptors = createAndRegisterColumnFamilyDescriptors(stateMetaInfoSnapshots, false); List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(stateMetaInfoSnapshots.size() + 1); RocksDB restoreDb = RocksDBOperationUtils.openDB( temporaryRestoreInstancePath.toString(), columnFamilyDescriptors, columnFamilyHandles, RocksDBOperationUtils.createColumnFamilyOptions(columnFamilyOptionsFactory, "default"), dbOptions); return new RestoredDBInstance(restoreDb, columnFamilyHandles, columnFamilyDescriptors, stateMetaInfoSnapshots); }
Example #11
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 5 votes |
private void restoreFromRemoteState(IncrementalRemoteKeyedStateHandle stateHandle) throws Exception { // used as restore source for IncrementalRemoteKeyedStateHandle final Path tmpRestoreInstancePath = instanceBasePath.getAbsoluteFile().toPath().resolve(UUID.randomUUID().toString()); try { restoreFromLocalState( transferRemoteStateToLocalDirectory(tmpRestoreInstancePath, stateHandle)); } finally { cleanUpPathQuietly(tmpRestoreInstancePath); } }
Example #12
Source File: MetadataV2V3SerializerBase.java From flink with Apache License 2.0 | 5 votes |
void serializeKeyedStateHandle(KeyedStateHandle stateHandle, DataOutputStream dos) throws IOException { if (stateHandle == null) { dos.writeByte(NULL_HANDLE); } else if (stateHandle instanceof KeyGroupsStateHandle) { KeyGroupsStateHandle keyGroupsStateHandle = (KeyGroupsStateHandle) stateHandle; dos.writeByte(KEY_GROUPS_HANDLE); dos.writeInt(keyGroupsStateHandle.getKeyGroupRange().getStartKeyGroup()); dos.writeInt(keyGroupsStateHandle.getKeyGroupRange().getNumberOfKeyGroups()); for (int keyGroup : keyGroupsStateHandle.getKeyGroupRange()) { dos.writeLong(keyGroupsStateHandle.getOffsetForKeyGroup(keyGroup)); } serializeStreamStateHandle(keyGroupsStateHandle.getDelegateStateHandle(), dos); } else if (stateHandle instanceof IncrementalRemoteKeyedStateHandle) { IncrementalRemoteKeyedStateHandle incrementalKeyedStateHandle = (IncrementalRemoteKeyedStateHandle) stateHandle; dos.writeByte(INCREMENTAL_KEY_GROUPS_HANDLE); dos.writeLong(incrementalKeyedStateHandle.getCheckpointId()); dos.writeUTF(String.valueOf(incrementalKeyedStateHandle.getBackendIdentifier())); dos.writeInt(incrementalKeyedStateHandle.getKeyGroupRange().getStartKeyGroup()); dos.writeInt(incrementalKeyedStateHandle.getKeyGroupRange().getNumberOfKeyGroups()); serializeStreamStateHandle(incrementalKeyedStateHandle.getMetaStateHandle(), dos); serializeStreamStateHandleMap(incrementalKeyedStateHandle.getSharedState(), dos); serializeStreamStateHandleMap(incrementalKeyedStateHandle.getPrivateState(), dos); } else { throw new IllegalStateException("Unknown KeyedStateHandle type: " + stateHandle.getClass()); } }
Example #13
Source File: RocksDBStateDownloader.java From flink with Apache License 2.0 | 5 votes |
/** * Transfer all state data to the target directory using specified number of threads. * * @param restoreStateHandle Handles used to retrieve the state data. * @param dest The target directory which the state data will be stored. * * @throws Exception Thrown if can not transfer all the state data. */ public void transferAllStateDataToDirectory( IncrementalRemoteKeyedStateHandle restoreStateHandle, Path dest, CloseableRegistry closeableRegistry) throws Exception { final Map<StateHandleID, StreamStateHandle> sstFiles = restoreStateHandle.getSharedState(); final Map<StateHandleID, StreamStateHandle> miscFiles = restoreStateHandle.getPrivateState(); downloadDataForAllStateHandles(sstFiles, dest, closeableRegistry); downloadDataForAllStateHandles(miscFiles, dest, closeableRegistry); }
Example #14
Source File: CheckpointTestUtils.java From flink with Apache License 2.0 | 5 votes |
public static IncrementalRemoteKeyedStateHandle createDummyIncrementalKeyedStateHandle(Random rnd) { return new IncrementalRemoteKeyedStateHandle( createRandomUUID(rnd), new KeyGroupRange(1, 1), 42L, createRandomStateHandleMap(rnd), createRandomStateHandleMap(rnd), createDummyStreamStateHandle(rnd)); }
Example #15
Source File: SavepointV2Serializer.java From flink with Apache License 2.0 | 5 votes |
@VisibleForTesting public static void serializeKeyedStateHandle( KeyedStateHandle stateHandle, DataOutputStream dos) throws IOException { if (stateHandle == null) { dos.writeByte(NULL_HANDLE); } else if (stateHandle instanceof KeyGroupsStateHandle) { KeyGroupsStateHandle keyGroupsStateHandle = (KeyGroupsStateHandle) stateHandle; dos.writeByte(KEY_GROUPS_HANDLE); dos.writeInt(keyGroupsStateHandle.getKeyGroupRange().getStartKeyGroup()); dos.writeInt(keyGroupsStateHandle.getKeyGroupRange().getNumberOfKeyGroups()); for (int keyGroup : keyGroupsStateHandle.getKeyGroupRange()) { dos.writeLong(keyGroupsStateHandle.getOffsetForKeyGroup(keyGroup)); } serializeStreamStateHandle(keyGroupsStateHandle.getDelegateStateHandle(), dos); } else if (stateHandle instanceof IncrementalRemoteKeyedStateHandle) { IncrementalRemoteKeyedStateHandle incrementalKeyedStateHandle = (IncrementalRemoteKeyedStateHandle) stateHandle; dos.writeByte(INCREMENTAL_KEY_GROUPS_HANDLE); dos.writeLong(incrementalKeyedStateHandle.getCheckpointId()); dos.writeUTF(String.valueOf(incrementalKeyedStateHandle.getBackendIdentifier())); dos.writeInt(incrementalKeyedStateHandle.getKeyGroupRange().getStartKeyGroup()); dos.writeInt(incrementalKeyedStateHandle.getKeyGroupRange().getNumberOfKeyGroups()); serializeStreamStateHandle(incrementalKeyedStateHandle.getMetaStateHandle(), dos); serializeStreamStateHandleMap(incrementalKeyedStateHandle.getSharedState(), dos); serializeStreamStateHandleMap(incrementalKeyedStateHandle.getPrivateState(), dos); } else { throw new IllegalStateException("Unknown KeyedStateHandle type: " + stateHandle.getClass()); } }
Example #16
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 5 votes |
private RestoredDBInstance restoreDBInstanceFromStateHandle( IncrementalRemoteKeyedStateHandle restoreStateHandle, Path temporaryRestoreInstancePath) throws Exception { try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(numberOfTransferringThreads)) { rocksDBStateDownloader.transferAllStateDataToDirectory( restoreStateHandle, temporaryRestoreInstancePath, cancelStreamRegistry); } KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(restoreStateHandle.getMetaStateHandle()); // read meta data List<StateMetaInfoSnapshot> stateMetaInfoSnapshots = serializationProxy.getStateMetaInfoSnapshots(); List<ColumnFamilyDescriptor> columnFamilyDescriptors = createAndRegisterColumnFamilyDescriptors(stateMetaInfoSnapshots, false); List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(stateMetaInfoSnapshots.size() + 1); RocksDB restoreDb = RocksDBOperationUtils.openDB( temporaryRestoreInstancePath.getPath(), columnFamilyDescriptors, columnFamilyHandles, RocksDBOperationUtils.createColumnFamilyOptions(columnFamilyOptionsFactory, "default"), dbOptions); return new RestoredDBInstance(restoreDb, columnFamilyHandles, columnFamilyDescriptors, stateMetaInfoSnapshots); }
Example #17
Source File: RocksDBStateDownloader.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Transfer all state data to the target directory using specified number of threads. * * @param restoreStateHandle Handles used to retrieve the state data. * @param dest The target directory which the state data will be stored. * * @throws Exception Thrown if can not transfer all the state data. */ public void transferAllStateDataToDirectory( IncrementalRemoteKeyedStateHandle restoreStateHandle, Path dest, CloseableRegistry closeableRegistry) throws Exception { final Map<StateHandleID, StreamStateHandle> sstFiles = restoreStateHandle.getSharedState(); final Map<StateHandleID, StreamStateHandle> miscFiles = restoreStateHandle.getPrivateState(); downloadDataForAllStateHandles(sstFiles, dest, closeableRegistry); downloadDataForAllStateHandles(miscFiles, dest, closeableRegistry); }
Example #18
Source File: CheckpointTestUtils.java From flink with Apache License 2.0 | 5 votes |
public static IncrementalRemoteKeyedStateHandle createDummyIncrementalKeyedStateHandle(Random rnd) { return new IncrementalRemoteKeyedStateHandle( createRandomUUID(rnd), new KeyGroupRange(1, 1), 42L, createRandomStateHandleMap(rnd), createRandomStateHandleMap(rnd), createDummyStreamStateHandle(rnd, null)); }
Example #19
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 5 votes |
private void restoreFromRemoteState(IncrementalRemoteKeyedStateHandle stateHandle) throws Exception { final Path tmpRestoreInstancePath = new Path( instanceBasePath.getAbsolutePath(), UUID.randomUUID().toString()); // used as restore source for IncrementalRemoteKeyedStateHandle try { restoreFromLocalState( transferRemoteStateToLocalDirectory(tmpRestoreInstancePath, stateHandle)); } finally { cleanUpPathQuietly(tmpRestoreInstancePath); } }
Example #20
Source File: RocksDBIncrementalRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void restoreFromRemoteState(IncrementalRemoteKeyedStateHandle stateHandle) throws Exception { final Path tmpRestoreInstancePath = new Path( instanceBasePath.getAbsolutePath(), UUID.randomUUID().toString()); // used as restore source for IncrementalRemoteKeyedStateHandle try { restoreFromLocalState( transferRemoteStateToLocalDirectory(tmpRestoreInstancePath, stateHandle)); } finally { cleanUpPathQuietly(tmpRestoreInstancePath); } }
Example #21
Source File: RocksDBIncrementalRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private RestoredDBInstance restoreDBInstanceFromStateHandle( IncrementalRemoteKeyedStateHandle restoreStateHandle, Path temporaryRestoreInstancePath) throws Exception { try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(numberOfTransferringThreads)) { rocksDBStateDownloader.transferAllStateDataToDirectory( restoreStateHandle, temporaryRestoreInstancePath, cancelStreamRegistry); } KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(restoreStateHandle.getMetaStateHandle()); // read meta data List<StateMetaInfoSnapshot> stateMetaInfoSnapshots = serializationProxy.getStateMetaInfoSnapshots(); List<ColumnFamilyDescriptor> columnFamilyDescriptors = createAndRegisterColumnFamilyDescriptors(stateMetaInfoSnapshots, false); List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(stateMetaInfoSnapshots.size() + 1); RocksDB restoreDb = RocksDBOperationUtils.openDB( temporaryRestoreInstancePath.getPath(), columnFamilyDescriptors, columnFamilyHandles, RocksDBOperationUtils.createColumnFamilyOptions(columnFamilyOptionsFactory, "default"), dbOptions); return new RestoredDBInstance(restoreDb, columnFamilyHandles, columnFamilyDescriptors, stateMetaInfoSnapshots); }
Example #22
Source File: SavepointV2Serializer.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@VisibleForTesting public static void serializeKeyedStateHandle( KeyedStateHandle stateHandle, DataOutputStream dos) throws IOException { if (stateHandle == null) { dos.writeByte(NULL_HANDLE); } else if (stateHandle instanceof KeyGroupsStateHandle) { KeyGroupsStateHandle keyGroupsStateHandle = (KeyGroupsStateHandle) stateHandle; dos.writeByte(KEY_GROUPS_HANDLE); dos.writeInt(keyGroupsStateHandle.getKeyGroupRange().getStartKeyGroup()); dos.writeInt(keyGroupsStateHandle.getKeyGroupRange().getNumberOfKeyGroups()); for (int keyGroup : keyGroupsStateHandle.getKeyGroupRange()) { dos.writeLong(keyGroupsStateHandle.getOffsetForKeyGroup(keyGroup)); } serializeStreamStateHandle(keyGroupsStateHandle.getDelegateStateHandle(), dos); } else if (stateHandle instanceof IncrementalRemoteKeyedStateHandle) { IncrementalRemoteKeyedStateHandle incrementalKeyedStateHandle = (IncrementalRemoteKeyedStateHandle) stateHandle; dos.writeByte(INCREMENTAL_KEY_GROUPS_HANDLE); dos.writeLong(incrementalKeyedStateHandle.getCheckpointId()); dos.writeUTF(String.valueOf(incrementalKeyedStateHandle.getBackendIdentifier())); dos.writeInt(incrementalKeyedStateHandle.getKeyGroupRange().getStartKeyGroup()); dos.writeInt(incrementalKeyedStateHandle.getKeyGroupRange().getNumberOfKeyGroups()); serializeStreamStateHandle(incrementalKeyedStateHandle.getMetaStateHandle(), dos); serializeStreamStateHandleMap(incrementalKeyedStateHandle.getSharedState(), dos); serializeStreamStateHandleMap(incrementalKeyedStateHandle.getPrivateState(), dos); } else { throw new IllegalStateException("Unknown KeyedStateHandle type: " + stateHandle.getClass()); } }
Example #23
Source File: CheckpointTestUtils.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public static IncrementalRemoteKeyedStateHandle createDummyIncrementalKeyedStateHandle(Random rnd) { return new IncrementalRemoteKeyedStateHandle( createRandomUUID(rnd), new KeyGroupRange(1, 1), 42L, createRandomStateHandleMap(rnd), createRandomStateHandleMap(rnd), createDummyStreamStateHandle(rnd)); }
Example #24
Source File: RocksDBStateDownloader.java From flink with Apache License 2.0 | 5 votes |
/** * Transfer all state data to the target directory using specified number of threads. * * @param restoreStateHandle Handles used to retrieve the state data. * @param dest The target directory which the state data will be stored. * * @throws Exception Thrown if can not transfer all the state data. */ public void transferAllStateDataToDirectory( IncrementalRemoteKeyedStateHandle restoreStateHandle, Path dest, CloseableRegistry closeableRegistry) throws Exception { final Map<StateHandleID, StreamStateHandle> sstFiles = restoreStateHandle.getSharedState(); final Map<StateHandleID, StreamStateHandle> miscFiles = restoreStateHandle.getPrivateState(); downloadDataForAllStateHandles(sstFiles, dest, closeableRegistry); downloadDataForAllStateHandles(miscFiles, dest, closeableRegistry); }
Example #25
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 4 votes |
/** * Recovery from multi incremental states with rescaling. For rescaling, this method creates a temporary * RocksDB instance for a key-groups shard. All contents from the temporary instance are copied into the * real restore instance and then the temporary instance is discarded. */ private void restoreWithRescaling(Collection<KeyedStateHandle> restoreStateHandles) throws Exception { // Prepare for restore with rescaling KeyedStateHandle initialHandle = RocksDBIncrementalCheckpointUtils.chooseTheBestStateHandleForInitial( restoreStateHandles, keyGroupRange); // Init base DB instance if (initialHandle != null) { restoreStateHandles.remove(initialHandle); initDBWithRescaling(initialHandle); } else { openDB(); } // Transfer remaining key-groups from temporary instance into base DB byte[] startKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes]; RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getStartKeyGroup(), startKeyGroupPrefixBytes); byte[] stopKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes]; RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getEndKeyGroup() + 1, stopKeyGroupPrefixBytes); for (KeyedStateHandle rawStateHandle : restoreStateHandles) { if (!(rawStateHandle instanceof IncrementalRemoteKeyedStateHandle)) { throw new IllegalStateException("Unexpected state handle type, " + "expected " + IncrementalRemoteKeyedStateHandle.class + ", but found " + rawStateHandle.getClass()); } Path temporaryRestoreInstancePath = instanceBasePath.getAbsoluteFile().toPath().resolve(UUID.randomUUID().toString()); try (RestoredDBInstance tmpRestoreDBInfo = restoreDBInstanceFromStateHandle( (IncrementalRemoteKeyedStateHandle) rawStateHandle, temporaryRestoreInstancePath); RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.db, writeBatchSize)) { List<ColumnFamilyDescriptor> tmpColumnFamilyDescriptors = tmpRestoreDBInfo.columnFamilyDescriptors; List<ColumnFamilyHandle> tmpColumnFamilyHandles = tmpRestoreDBInfo.columnFamilyHandles; // iterating only the requested descriptors automatically skips the default column family handle for (int i = 0; i < tmpColumnFamilyDescriptors.size(); ++i) { ColumnFamilyHandle tmpColumnFamilyHandle = tmpColumnFamilyHandles.get(i); ColumnFamilyHandle targetColumnFamilyHandle = getOrRegisterStateColumnFamilyHandle( null, tmpRestoreDBInfo.stateMetaInfoSnapshots.get(i)) .columnFamilyHandle; try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(tmpRestoreDBInfo.db, tmpColumnFamilyHandle, tmpRestoreDBInfo.readOptions)) { iterator.seek(startKeyGroupPrefixBytes); while (iterator.isValid()) { if (RocksDBIncrementalCheckpointUtils.beforeThePrefixBytes(iterator.key(), stopKeyGroupPrefixBytes)) { writeBatchWrapper.put(targetColumnFamilyHandle, iterator.key(), iterator.value()); } else { // Since the iterator will visit the record according to the sorted order, // we can just break here. break; } iterator.next(); } } // releases native iterator resources } } finally { cleanUpPathQuietly(temporaryRestoreInstancePath); } } }
Example #26
Source File: RocksDBIncrementalRestoreOperation.java From flink with Apache License 2.0 | 4 votes |
/** * Recovery from multi incremental states with rescaling. For rescaling, this method creates a temporary * RocksDB instance for a key-groups shard. All contents from the temporary instance are copied into the * real restore instance and then the temporary instance is discarded. */ private void restoreWithRescaling(Collection<KeyedStateHandle> restoreStateHandles) throws Exception { // Prepare for restore with rescaling KeyedStateHandle initialHandle = RocksDBIncrementalCheckpointUtils.chooseTheBestStateHandleForInitial( restoreStateHandles, keyGroupRange); // Init base DB instance if (initialHandle != null) { restoreStateHandles.remove(initialHandle); initDBWithRescaling(initialHandle); } else { openDB(); } // Transfer remaining key-groups from temporary instance into base DB byte[] startKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes]; RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getStartKeyGroup(), startKeyGroupPrefixBytes); byte[] stopKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes]; RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getEndKeyGroup() + 1, stopKeyGroupPrefixBytes); for (KeyedStateHandle rawStateHandle : restoreStateHandles) { if (!(rawStateHandle instanceof IncrementalRemoteKeyedStateHandle)) { throw new IllegalStateException("Unexpected state handle type, " + "expected " + IncrementalRemoteKeyedStateHandle.class + ", but found " + rawStateHandle.getClass()); } Path temporaryRestoreInstancePath = new Path(instanceBasePath.getAbsolutePath() + UUID.randomUUID().toString()); try (RestoredDBInstance tmpRestoreDBInfo = restoreDBInstanceFromStateHandle( (IncrementalRemoteKeyedStateHandle) rawStateHandle, temporaryRestoreInstancePath); RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.db)) { List<ColumnFamilyDescriptor> tmpColumnFamilyDescriptors = tmpRestoreDBInfo.columnFamilyDescriptors; List<ColumnFamilyHandle> tmpColumnFamilyHandles = tmpRestoreDBInfo.columnFamilyHandles; // iterating only the requested descriptors automatically skips the default column family handle for (int i = 0; i < tmpColumnFamilyDescriptors.size(); ++i) { ColumnFamilyHandle tmpColumnFamilyHandle = tmpColumnFamilyHandles.get(i); ColumnFamilyHandle targetColumnFamilyHandle = getOrRegisterStateColumnFamilyHandle( null, tmpRestoreDBInfo.stateMetaInfoSnapshots.get(i)) .columnFamilyHandle; try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(tmpRestoreDBInfo.db, tmpColumnFamilyHandle)) { iterator.seek(startKeyGroupPrefixBytes); while (iterator.isValid()) { if (RocksDBIncrementalCheckpointUtils.beforeThePrefixBytes(iterator.key(), stopKeyGroupPrefixBytes)) { writeBatchWrapper.put(targetColumnFamilyHandle, iterator.key(), iterator.value()); } else { // Since the iterator will visit the record according to the sorted order, // we can just break here. break; } iterator.next(); } } // releases native iterator resources } } finally { cleanUpPathQuietly(temporaryRestoreInstancePath); } } }
Example #27
Source File: CheckpointCoordinatorTest.java From flink with Apache License 2.0 | 4 votes |
private void performIncrementalCheckpoint( JobID jid, CheckpointCoordinator coord, ExecutionJobVertex jobVertex1, List<KeyGroupRange> keyGroupPartitions1, int cpSequenceNumber) throws Exception { // trigger the checkpoint coord.triggerCheckpoint(false); manuallyTriggeredScheduledExecutor.triggerAll(); assertEquals(1, coord.getPendingCheckpoints().size()); long checkpointId = Iterables.getOnlyElement(coord.getPendingCheckpoints().keySet()); for (int index = 0; index < jobVertex1.getParallelism(); index++) { KeyGroupRange keyGroupRange = keyGroupPartitions1.get(index); Map<StateHandleID, StreamStateHandle> privateState = new HashMap<>(); privateState.put( new StateHandleID("private-1"), spy(new ByteStreamStateHandle("private-1", new byte[]{'p'}))); Map<StateHandleID, StreamStateHandle> sharedState = new HashMap<>(); // let all but the first CP overlap by one shared state. if (cpSequenceNumber > 0) { sharedState.put( new StateHandleID("shared-" + (cpSequenceNumber - 1)), spy(new PlaceholderStreamStateHandle())); } sharedState.put( new StateHandleID("shared-" + cpSequenceNumber), spy(new ByteStreamStateHandle("shared-" + cpSequenceNumber + "-" + keyGroupRange, new byte[]{'s'}))); IncrementalRemoteKeyedStateHandle managedState = spy(new IncrementalRemoteKeyedStateHandle( new UUID(42L, 42L), keyGroupRange, checkpointId, sharedState, privateState, spy(new ByteStreamStateHandle("meta", new byte[]{'m'})))); OperatorSubtaskState operatorSubtaskState = spy(new OperatorSubtaskState( StateObjectCollection.empty(), StateObjectCollection.empty(), StateObjectCollection.singleton(managedState), StateObjectCollection.empty())); Map<OperatorID, OperatorSubtaskState> opStates = new HashMap<>(); opStates.put(jobVertex1.getOperatorIDs().get(0).getGeneratedOperatorID(), operatorSubtaskState); TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot(opStates); AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint( jid, jobVertex1.getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), taskStateSnapshot); coord.receiveAcknowledgeMessage(acknowledgeCheckpoint, TASK_MANAGER_LOCATION_INFO); } }
Example #28
Source File: RocksDBIncrementalRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Recovery from multi incremental states with rescaling. For rescaling, this method creates a temporary * RocksDB instance for a key-groups shard. All contents from the temporary instance are copied into the * real restore instance and then the temporary instance is discarded. */ private void restoreWithRescaling(Collection<KeyedStateHandle> restoreStateHandles) throws Exception { // Prepare for restore with rescaling KeyedStateHandle initialHandle = RocksDBIncrementalCheckpointUtils.chooseTheBestStateHandleForInitial( restoreStateHandles, keyGroupRange); // Init base DB instance if (initialHandle != null) { restoreStateHandles.remove(initialHandle); initDBWithRescaling(initialHandle); } else { openDB(); } // Transfer remaining key-groups from temporary instance into base DB byte[] startKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes]; RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getStartKeyGroup(), startKeyGroupPrefixBytes); byte[] stopKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes]; RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getEndKeyGroup() + 1, stopKeyGroupPrefixBytes); for (KeyedStateHandle rawStateHandle : restoreStateHandles) { if (!(rawStateHandle instanceof IncrementalRemoteKeyedStateHandle)) { throw new IllegalStateException("Unexpected state handle type, " + "expected " + IncrementalRemoteKeyedStateHandle.class + ", but found " + rawStateHandle.getClass()); } Path temporaryRestoreInstancePath = new Path(instanceBasePath.getAbsolutePath() + UUID.randomUUID().toString()); try (RestoredDBInstance tmpRestoreDBInfo = restoreDBInstanceFromStateHandle( (IncrementalRemoteKeyedStateHandle) rawStateHandle, temporaryRestoreInstancePath); RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.db)) { List<ColumnFamilyDescriptor> tmpColumnFamilyDescriptors = tmpRestoreDBInfo.columnFamilyDescriptors; List<ColumnFamilyHandle> tmpColumnFamilyHandles = tmpRestoreDBInfo.columnFamilyHandles; // iterating only the requested descriptors automatically skips the default column family handle for (int i = 0; i < tmpColumnFamilyDescriptors.size(); ++i) { ColumnFamilyHandle tmpColumnFamilyHandle = tmpColumnFamilyHandles.get(i); ColumnFamilyHandle targetColumnFamilyHandle = getOrRegisterStateColumnFamilyHandle( null, tmpRestoreDBInfo.stateMetaInfoSnapshots.get(i)) .columnFamilyHandle; try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(tmpRestoreDBInfo.db, tmpColumnFamilyHandle)) { iterator.seek(startKeyGroupPrefixBytes); while (iterator.isValid()) { if (RocksDBIncrementalCheckpointUtils.beforeThePrefixBytes(iterator.key(), stopKeyGroupPrefixBytes)) { writeBatchWrapper.put(targetColumnFamilyHandle, iterator.key(), iterator.value()); } else { // Since the iterator will visit the record according to the sorted order, // we can just break here. break; } iterator.next(); } } // releases native iterator resources } } finally { cleanUpPathQuietly(temporaryRestoreInstancePath); } } }
Example #29
Source File: RocksDBStateBackendTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testSharedIncrementalStateDeRegistration() throws Exception { if (enableIncrementalCheckpointing) { AbstractKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE); try { ValueStateDescriptor<String> kvId = new ValueStateDescriptor<>("id", String.class, null); kvId.initializeSerializerUnlessSet(new ExecutionConfig()); ValueState<String> state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId); Queue<IncrementalRemoteKeyedStateHandle> previousStateHandles = new LinkedList<>(); SharedStateRegistry sharedStateRegistry = spy(new SharedStateRegistry()); for (int checkpointId = 0; checkpointId < 3; ++checkpointId) { reset(sharedStateRegistry); backend.setCurrentKey(checkpointId); state.update("Hello-" + checkpointId); RunnableFuture<SnapshotResult<KeyedStateHandle>> snapshot = backend.snapshot( checkpointId, checkpointId, createStreamFactory(), CheckpointOptions.forCheckpointWithDefaultLocation()); snapshot.run(); SnapshotResult<KeyedStateHandle> snapshotResult = snapshot.get(); IncrementalRemoteKeyedStateHandle stateHandle = (IncrementalRemoteKeyedStateHandle) snapshotResult.getJobManagerOwnedSnapshot(); Map<StateHandleID, StreamStateHandle> sharedState = new HashMap<>(stateHandle.getSharedState()); stateHandle.registerSharedStates(sharedStateRegistry); for (Map.Entry<StateHandleID, StreamStateHandle> e : sharedState.entrySet()) { verify(sharedStateRegistry).registerReference( stateHandle.createSharedStateRegistryKeyFromFileName(e.getKey()), e.getValue()); } previousStateHandles.add(stateHandle); backend.notifyCheckpointComplete(checkpointId); //----------------------------------------------------------------- if (previousStateHandles.size() > 1) { checkRemove(previousStateHandles.remove(), sharedStateRegistry); } } while (!previousStateHandles.isEmpty()) { reset(sharedStateRegistry); checkRemove(previousStateHandles.remove(), sharedStateRegistry); } } finally { IOUtils.closeQuietly(backend); backend.dispose(); } } }
Example #30
Source File: RocksDBStateDownloaderTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that download files with multi-thread correctly. */ @Test public void testMultiThreadRestoreCorrectly() throws Exception { Random random = new Random(); int contentNum = 6; byte[][] contents = new byte[contentNum][]; for (int i = 0; i < contentNum; ++i) { contents[i] = new byte[random.nextInt(100000) + 1]; random.nextBytes(contents[i]); } List<StreamStateHandle> handles = new ArrayList<>(contentNum); for (int i = 0; i < contentNum; ++i) { handles.add(new ByteStreamStateHandle(String.format("state%d", i), contents[i])); } Map<StateHandleID, StreamStateHandle> sharedStates = new HashMap<>(contentNum); Map<StateHandleID, StreamStateHandle> privateStates = new HashMap<>(contentNum); for (int i = 0; i < contentNum; ++i) { sharedStates.put(new StateHandleID(String.format("sharedState%d", i)), handles.get(i)); privateStates.put(new StateHandleID(String.format("privateState%d", i)), handles.get(i)); } IncrementalRemoteKeyedStateHandle incrementalKeyedStateHandle = new IncrementalRemoteKeyedStateHandle( UUID.randomUUID(), KeyGroupRange.of(0, 1), 1, sharedStates, privateStates, handles.get(0)); Path dstPath = temporaryFolder.newFolder().toPath(); try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(5)) { rocksDBStateDownloader.transferAllStateDataToDirectory(incrementalKeyedStateHandle, dstPath, new CloseableRegistry()); } for (int i = 0; i < contentNum; ++i) { assertStateContentEqual(contents[i], dstPath.resolve(String.format("sharedState%d", i))); } }