Java Code Examples for org.apache.flink.api.common.state.StateDescriptor#getType()
The following examples show how to use
org.apache.flink.api.common.state.StateDescriptor#getType() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RegisteredKeyValueStateBackendMetaInfo.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public void checkStateMetaInfo(StateDescriptor<?, ?> stateDesc) { Preconditions.checkState( Objects.equals(stateDesc.getName(), getName()), "Incompatible state names. " + "Was [" + getName() + "], " + "registered with [" + stateDesc.getName() + "]."); if (stateDesc.getType() != StateDescriptor.Type.UNKNOWN && getStateType() != StateDescriptor.Type.UNKNOWN) { Preconditions.checkState( stateDesc.getType() == getStateType(), "Incompatible key/value state types. " + "Was [" + getStateType() + "], " + "registered with [" + stateDesc.getType() + "]."); } }
Example 2
Source File: RegisteredKeyValueStateBackendMetaInfo.java From flink with Apache License 2.0 | 6 votes |
public void checkStateMetaInfo(StateDescriptor<?, ?> stateDesc) { Preconditions.checkState( Objects.equals(stateDesc.getName(), getName()), "Incompatible state names. " + "Was [" + getName() + "], " + "registered with [" + stateDesc.getName() + "]."); if (stateDesc.getType() != StateDescriptor.Type.UNKNOWN && getStateType() != StateDescriptor.Type.UNKNOWN) { Preconditions.checkState( stateDesc.getType() == getStateType(), "Incompatible key/value state types. " + "Was [" + getStateType() + "], " + "registered with [" + stateDesc.getType() + "]."); } }
Example 3
Source File: RegisteredKeyValueStateBackendMetaInfo.java From flink with Apache License 2.0 | 6 votes |
public void checkStateMetaInfo(StateDescriptor<?, ?> stateDesc) { Preconditions.checkState( Objects.equals(stateDesc.getName(), getName()), "Incompatible state names. " + "Was [" + getName() + "], " + "registered with [" + stateDesc.getName() + "]."); if (stateDesc.getType() != StateDescriptor.Type.UNKNOWN && getStateType() != StateDescriptor.Type.UNKNOWN) { Preconditions.checkState( stateDesc.getType() == getStateType(), "Incompatible key/value state types. " + "Was [" + getStateType() + "], " + "registered with [" + stateDesc.getType() + "]."); } }
Example 4
Source File: RocksDBKeyedStateBackend.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Registers a k/v state information, which includes its state id, type, RocksDB column family handle, and serializers. * * <p>When restoring from a snapshot, we don’t restore the individual k/v states, just the global RocksDB database and * the list of k/v state information. When a k/v state is first requested we check here whether we * already have a registered entry for that and return it (after some necessary state compatibility checks) * or create a new one if it does not exist. */ private <N, S extends State, SV, SEV> Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> tryRegisterKvStateInformation( StateDescriptor<S, SV> stateDesc, TypeSerializer<N> namespaceSerializer, @Nonnull StateSnapshotTransformFactory<SEV> snapshotTransformFactory) throws Exception { RocksDbKvStateInfo oldStateInfo = kvStateInformation.get(stateDesc.getName()); TypeSerializer<SV> stateSerializer = stateDesc.getSerializer(); RocksDbKvStateInfo newRocksStateInfo; RegisteredKeyValueStateBackendMetaInfo<N, SV> newMetaInfo; if (oldStateInfo != null) { @SuppressWarnings("unchecked") RegisteredKeyValueStateBackendMetaInfo<N, SV> castedMetaInfo = (RegisteredKeyValueStateBackendMetaInfo<N, SV>) oldStateInfo.metaInfo; newMetaInfo = updateRestoredStateMetaInfo( Tuple2.of(oldStateInfo.columnFamilyHandle, castedMetaInfo), stateDesc, namespaceSerializer, stateSerializer); newRocksStateInfo = new RocksDbKvStateInfo(oldStateInfo.columnFamilyHandle, newMetaInfo); kvStateInformation.put(stateDesc.getName(), newRocksStateInfo); } else { newMetaInfo = new RegisteredKeyValueStateBackendMetaInfo<>( stateDesc.getType(), stateDesc.getName(), namespaceSerializer, stateSerializer, StateSnapshotTransformFactory.noTransform()); newRocksStateInfo = RocksDBOperationUtils.createStateInfo( newMetaInfo, db, columnFamilyOptionsFactory, ttlCompactFiltersManager); RocksDBOperationUtils.registerKvStateInformation(this.kvStateInformation, this.nativeMetricMonitor, stateDesc.getName(), newRocksStateInfo); } StateSnapshotTransformFactory<SV> wrappedSnapshotTransformFactory = wrapStateSnapshotTransformFactory( stateDesc, snapshotTransformFactory, newMetaInfo.getStateSerializer()); newMetaInfo.updateSnapshotTransformFactory(wrappedSnapshotTransformFactory); ttlCompactFiltersManager.configCompactFilter(stateDesc, newMetaInfo.getStateSerializer()); return Tuple2.of(newRocksStateInfo.columnFamilyHandle, newMetaInfo); }
Example 5
Source File: RocksDBKeyedStateBackend.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Migrate only the state value, that is the "value" that is stored in RocksDB. We don't migrate * the key here, which is made up of key group, key, namespace and map key * (in case of MapState). */ private <N, S extends State, SV> void migrateStateValues( StateDescriptor<S, SV> stateDesc, Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> stateMetaInfo) throws Exception { if (stateDesc.getType() == StateDescriptor.Type.MAP) { throw new StateMigrationException("The new serializer for a MapState requires state migration in order for the job to proceed." + " However, migration for MapState currently isn't supported."); } LOG.info( "Performing state migration for state {} because the state serializer's schema, i.e. serialization format, has changed.", stateDesc); // we need to get an actual state instance because migration is different // for different state types. For example, ListState needs to deal with // individual elements StateFactory stateFactory = STATE_FACTORIES.get(stateDesc.getClass()); if (stateFactory == null) { String message = String.format("State %s is not supported by %s", stateDesc.getClass(), this.getClass()); throw new FlinkRuntimeException(message); } State state = stateFactory.createState( stateDesc, stateMetaInfo, RocksDBKeyedStateBackend.this); if (!(state instanceof AbstractRocksDBState)) { throw new FlinkRuntimeException( "State should be an AbstractRocksDBState but is " + state); } @SuppressWarnings("unchecked") AbstractRocksDBState<?, ?, SV> rocksDBState = (AbstractRocksDBState<?, ?, SV>) state; Snapshot rocksDBSnapshot = db.getSnapshot(); try ( RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(db, stateMetaInfo.f0); RocksDBWriteBatchWrapper batchWriter = new RocksDBWriteBatchWrapper(db, getWriteOptions()) ) { iterator.seekToFirst(); DataInputDeserializer serializedValueInput = new DataInputDeserializer(); DataOutputSerializer migratedSerializedValueOutput = new DataOutputSerializer(512); while (iterator.isValid()) { serializedValueInput.setBuffer(iterator.value()); rocksDBState.migrateSerializedValue( serializedValueInput, migratedSerializedValueOutput, stateMetaInfo.f1.getPreviousStateSerializer(), stateMetaInfo.f1.getStateSerializer()); batchWriter.put(stateMetaInfo.f0, iterator.key(), migratedSerializedValueOutput.getCopyOfBuffer()); migratedSerializedValueOutput.clear(); iterator.next(); } } finally { db.releaseSnapshot(rocksDBSnapshot); rocksDBSnapshot.close(); } }
Example 6
Source File: HeapKeyedStateBackend.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
private <N, V> StateTable<K, N, V> tryRegisterStateTable( TypeSerializer<N> namespaceSerializer, StateDescriptor<?, V> stateDesc, @Nonnull StateSnapshotTransformFactory<V> snapshotTransformFactory) throws StateMigrationException { @SuppressWarnings("unchecked") StateTable<K, N, V> stateTable = (StateTable<K, N, V>) registeredKVStates.get(stateDesc.getName()); TypeSerializer<V> newStateSerializer = stateDesc.getSerializer(); if (stateTable != null) { RegisteredKeyValueStateBackendMetaInfo<N, V> restoredKvMetaInfo = stateTable.getMetaInfo(); restoredKvMetaInfo.updateSnapshotTransformFactory(snapshotTransformFactory); TypeSerializerSchemaCompatibility<N> namespaceCompatibility = restoredKvMetaInfo.updateNamespaceSerializer(namespaceSerializer); if (namespaceCompatibility.isCompatibleAfterMigration() || namespaceCompatibility.isIncompatible()) { throw new StateMigrationException("For heap backends, the new namespace serializer must be compatible."); } restoredKvMetaInfo.checkStateMetaInfo(stateDesc); TypeSerializerSchemaCompatibility<V> stateCompatibility = restoredKvMetaInfo.updateStateSerializer(newStateSerializer); if (stateCompatibility.isIncompatible()) { throw new StateMigrationException("For heap backends, the new state serializer must not be incompatible."); } stateTable.setMetaInfo(restoredKvMetaInfo); } else { RegisteredKeyValueStateBackendMetaInfo<N, V> newMetaInfo = new RegisteredKeyValueStateBackendMetaInfo<>( stateDesc.getType(), stateDesc.getName(), namespaceSerializer, newStateSerializer, snapshotTransformFactory); stateTable = snapshotStrategy.newStateTable(this, newMetaInfo); registeredKVStates.put(stateDesc.getName(), stateTable); } return stateTable; }
Example 7
Source File: RocksDBKeyedStateBackend.java From flink with Apache License 2.0 | 4 votes |
/** * Registers a k/v state information, which includes its state id, type, RocksDB column family handle, and serializers. * * <p>When restoring from a snapshot, we don’t restore the individual k/v states, just the global RocksDB database and * the list of k/v state information. When a k/v state is first requested we check here whether we * already have a registered entry for that and return it (after some necessary state compatibility checks) * or create a new one if it does not exist. */ private <N, S extends State, SV, SEV> Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> tryRegisterKvStateInformation( StateDescriptor<S, SV> stateDesc, TypeSerializer<N> namespaceSerializer, @Nonnull StateSnapshotTransformFactory<SEV> snapshotTransformFactory) throws Exception { RocksDbKvStateInfo oldStateInfo = kvStateInformation.get(stateDesc.getName()); TypeSerializer<SV> stateSerializer = stateDesc.getSerializer(); RocksDbKvStateInfo newRocksStateInfo; RegisteredKeyValueStateBackendMetaInfo<N, SV> newMetaInfo; if (oldStateInfo != null) { @SuppressWarnings("unchecked") RegisteredKeyValueStateBackendMetaInfo<N, SV> castedMetaInfo = (RegisteredKeyValueStateBackendMetaInfo<N, SV>) oldStateInfo.metaInfo; newMetaInfo = updateRestoredStateMetaInfo( Tuple2.of(oldStateInfo.columnFamilyHandle, castedMetaInfo), stateDesc, namespaceSerializer, stateSerializer); newRocksStateInfo = new RocksDbKvStateInfo(oldStateInfo.columnFamilyHandle, newMetaInfo); kvStateInformation.put(stateDesc.getName(), newRocksStateInfo); } else { newMetaInfo = new RegisteredKeyValueStateBackendMetaInfo<>( stateDesc.getType(), stateDesc.getName(), namespaceSerializer, stateSerializer, StateSnapshotTransformFactory.noTransform()); newRocksStateInfo = RocksDBOperationUtils.createStateInfo( newMetaInfo, db, columnFamilyOptionsFactory, ttlCompactFiltersManager); RocksDBOperationUtils.registerKvStateInformation(this.kvStateInformation, this.nativeMetricMonitor, stateDesc.getName(), newRocksStateInfo); } StateSnapshotTransformFactory<SV> wrappedSnapshotTransformFactory = wrapStateSnapshotTransformFactory( stateDesc, snapshotTransformFactory, newMetaInfo.getStateSerializer()); newMetaInfo.updateSnapshotTransformFactory(wrappedSnapshotTransformFactory); ttlCompactFiltersManager.configCompactFilter(stateDesc, newMetaInfo.getStateSerializer()); return Tuple2.of(newRocksStateInfo.columnFamilyHandle, newMetaInfo); }
Example 8
Source File: RocksDBKeyedStateBackend.java From flink with Apache License 2.0 | 4 votes |
/** * Migrate only the state value, that is the "value" that is stored in RocksDB. We don't migrate * the key here, which is made up of key group, key, namespace and map key * (in case of MapState). */ @SuppressWarnings("unchecked") private <N, S extends State, SV> void migrateStateValues( StateDescriptor<S, SV> stateDesc, Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> stateMetaInfo) throws Exception { if (stateDesc.getType() == StateDescriptor.Type.MAP) { TypeSerializerSnapshot<SV> previousSerializerSnapshot = stateMetaInfo.f1.getPreviousStateSerializerSnapshot(); checkState(previousSerializerSnapshot != null, "the previous serializer snapshot should exist."); checkState(previousSerializerSnapshot instanceof MapSerializerSnapshot, "previous serializer snapshot should be a MapSerializerSnapshot."); TypeSerializer<SV> newSerializer = stateMetaInfo.f1.getStateSerializer(); checkState(newSerializer instanceof MapSerializer, "new serializer should be a MapSerializer."); MapSerializer<?, ?> mapSerializer = (MapSerializer<?, ?>) newSerializer; MapSerializerSnapshot<?, ?> mapSerializerSnapshot = (MapSerializerSnapshot<?, ?>) previousSerializerSnapshot; if (!checkMapStateKeySchemaCompatibility(mapSerializerSnapshot, mapSerializer)) { throw new StateMigrationException( "The new serializer for a MapState requires state migration in order for the job to proceed, since the key schema has changed. However, migration for MapState currently only allows value schema evolutions."); } } LOG.info( "Performing state migration for state {} because the state serializer's schema, i.e. serialization format, has changed.", stateDesc); // we need to get an actual state instance because migration is different // for different state types. For example, ListState needs to deal with // individual elements StateFactory stateFactory = STATE_FACTORIES.get(stateDesc.getClass()); if (stateFactory == null) { String message = String.format("State %s is not supported by %s", stateDesc.getClass(), this.getClass()); throw new FlinkRuntimeException(message); } State state = stateFactory.createState( stateDesc, stateMetaInfo, RocksDBKeyedStateBackend.this); if (!(state instanceof AbstractRocksDBState)) { throw new FlinkRuntimeException( "State should be an AbstractRocksDBState but is " + state); } @SuppressWarnings("unchecked") AbstractRocksDBState<?, ?, SV> rocksDBState = (AbstractRocksDBState<?, ?, SV>) state; Snapshot rocksDBSnapshot = db.getSnapshot(); try ( RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(db, stateMetaInfo.f0); RocksDBWriteBatchWrapper batchWriter = new RocksDBWriteBatchWrapper(db, getWriteOptions()) ) { iterator.seekToFirst(); DataInputDeserializer serializedValueInput = new DataInputDeserializer(); DataOutputSerializer migratedSerializedValueOutput = new DataOutputSerializer(512); while (iterator.isValid()) { serializedValueInput.setBuffer(iterator.value()); rocksDBState.migrateSerializedValue( serializedValueInput, migratedSerializedValueOutput, stateMetaInfo.f1.getPreviousStateSerializer(), stateMetaInfo.f1.getStateSerializer()); batchWriter.put(stateMetaInfo.f0, iterator.key(), migratedSerializedValueOutput.getCopyOfBuffer()); migratedSerializedValueOutput.clear(); iterator.next(); } } finally { db.releaseSnapshot(rocksDBSnapshot); rocksDBSnapshot.close(); } }
Example 9
Source File: HeapKeyedStateBackend.java From flink with Apache License 2.0 | 4 votes |
private <N, V> StateTable<K, N, V> tryRegisterStateTable( TypeSerializer<N> namespaceSerializer, StateDescriptor<?, V> stateDesc, @Nonnull StateSnapshotTransformFactory<V> snapshotTransformFactory) throws StateMigrationException { @SuppressWarnings("unchecked") StateTable<K, N, V> stateTable = (StateTable<K, N, V>) registeredKVStates.get(stateDesc.getName()); TypeSerializer<V> newStateSerializer = stateDesc.getSerializer(); if (stateTable != null) { RegisteredKeyValueStateBackendMetaInfo<N, V> restoredKvMetaInfo = stateTable.getMetaInfo(); restoredKvMetaInfo.updateSnapshotTransformFactory(snapshotTransformFactory); TypeSerializerSchemaCompatibility<N> namespaceCompatibility = restoredKvMetaInfo.updateNamespaceSerializer(namespaceSerializer); if (namespaceCompatibility.isCompatibleAfterMigration() || namespaceCompatibility.isIncompatible()) { throw new StateMigrationException("For heap backends, the new namespace serializer must be compatible."); } restoredKvMetaInfo.checkStateMetaInfo(stateDesc); TypeSerializerSchemaCompatibility<V> stateCompatibility = restoredKvMetaInfo.updateStateSerializer(newStateSerializer); if (stateCompatibility.isIncompatible()) { throw new StateMigrationException("For heap backends, the new state serializer must not be incompatible."); } stateTable.setMetaInfo(restoredKvMetaInfo); } else { RegisteredKeyValueStateBackendMetaInfo<N, V> newMetaInfo = new RegisteredKeyValueStateBackendMetaInfo<>( stateDesc.getType(), stateDesc.getName(), namespaceSerializer, newStateSerializer, snapshotTransformFactory); stateTable = snapshotStrategy.newStateTable(keyContext, newMetaInfo, keySerializer); registeredKVStates.put(stateDesc.getName(), stateTable); } return stateTable; }
Example 10
Source File: RocksDBKeyedStateBackend.java From flink with Apache License 2.0 | 4 votes |
/** * Registers a k/v state information, which includes its state id, type, RocksDB column family handle, and serializers. * * <p>When restoring from a snapshot, we don’t restore the individual k/v states, just the global RocksDB database and * the list of k/v state information. When a k/v state is first requested we check here whether we * already have a registered entry for that and return it (after some necessary state compatibility checks) * or create a new one if it does not exist. */ private <N, S extends State, SV, SEV> Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> tryRegisterKvStateInformation( StateDescriptor<S, SV> stateDesc, TypeSerializer<N> namespaceSerializer, @Nonnull StateSnapshotTransformFactory<SEV> snapshotTransformFactory) throws Exception { RocksDbKvStateInfo oldStateInfo = kvStateInformation.get(stateDesc.getName()); TypeSerializer<SV> stateSerializer = stateDesc.getSerializer(); RocksDbKvStateInfo newRocksStateInfo; RegisteredKeyValueStateBackendMetaInfo<N, SV> newMetaInfo; if (oldStateInfo != null) { @SuppressWarnings("unchecked") RegisteredKeyValueStateBackendMetaInfo<N, SV> castedMetaInfo = (RegisteredKeyValueStateBackendMetaInfo<N, SV>) oldStateInfo.metaInfo; newMetaInfo = updateRestoredStateMetaInfo( Tuple2.of(oldStateInfo.columnFamilyHandle, castedMetaInfo), stateDesc, namespaceSerializer, stateSerializer); newRocksStateInfo = new RocksDbKvStateInfo(oldStateInfo.columnFamilyHandle, newMetaInfo); kvStateInformation.put(stateDesc.getName(), newRocksStateInfo); } else { newMetaInfo = new RegisteredKeyValueStateBackendMetaInfo<>( stateDesc.getType(), stateDesc.getName(), namespaceSerializer, stateSerializer, StateSnapshotTransformFactory.noTransform()); newRocksStateInfo = RocksDBOperationUtils.createStateInfo( newMetaInfo, db, columnFamilyOptionsFactory, ttlCompactFiltersManager); RocksDBOperationUtils.registerKvStateInformation(this.kvStateInformation, this.nativeMetricMonitor, stateDesc.getName(), newRocksStateInfo); } StateSnapshotTransformFactory<SV> wrappedSnapshotTransformFactory = wrapStateSnapshotTransformFactory( stateDesc, snapshotTransformFactory, newMetaInfo.getStateSerializer()); newMetaInfo.updateSnapshotTransformFactory(wrappedSnapshotTransformFactory); ttlCompactFiltersManager.configCompactFilter(stateDesc, newMetaInfo.getStateSerializer()); return Tuple2.of(newRocksStateInfo.columnFamilyHandle, newMetaInfo); }
Example 11
Source File: RocksDBKeyedStateBackend.java From flink with Apache License 2.0 | 4 votes |
/** * Migrate only the state value, that is the "value" that is stored in RocksDB. We don't migrate * the key here, which is made up of key group, key, namespace and map key * (in case of MapState). */ @SuppressWarnings("unchecked") private <N, S extends State, SV> void migrateStateValues( StateDescriptor<S, SV> stateDesc, Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> stateMetaInfo) throws Exception { if (stateDesc.getType() == StateDescriptor.Type.MAP) { TypeSerializerSnapshot<SV> previousSerializerSnapshot = stateMetaInfo.f1.getPreviousStateSerializerSnapshot(); checkState(previousSerializerSnapshot != null, "the previous serializer snapshot should exist."); checkState(previousSerializerSnapshot instanceof MapSerializerSnapshot, "previous serializer snapshot should be a MapSerializerSnapshot."); TypeSerializer<SV> newSerializer = stateMetaInfo.f1.getStateSerializer(); checkState(newSerializer instanceof MapSerializer, "new serializer should be a MapSerializer."); MapSerializer<?, ?> mapSerializer = (MapSerializer<?, ?>) newSerializer; MapSerializerSnapshot<?, ?> mapSerializerSnapshot = (MapSerializerSnapshot<?, ?>) previousSerializerSnapshot; if (!checkMapStateKeySchemaCompatibility(mapSerializerSnapshot, mapSerializer)) { throw new StateMigrationException( "The new serializer for a MapState requires state migration in order for the job to proceed, since the key schema has changed. However, migration for MapState currently only allows value schema evolutions."); } } LOG.info( "Performing state migration for state {} because the state serializer's schema, i.e. serialization format, has changed.", stateDesc); // we need to get an actual state instance because migration is different // for different state types. For example, ListState needs to deal with // individual elements StateFactory stateFactory = STATE_FACTORIES.get(stateDesc.getClass()); if (stateFactory == null) { String message = String.format("State %s is not supported by %s", stateDesc.getClass(), this.getClass()); throw new FlinkRuntimeException(message); } State state = stateFactory.createState( stateDesc, stateMetaInfo, RocksDBKeyedStateBackend.this); if (!(state instanceof AbstractRocksDBState)) { throw new FlinkRuntimeException( "State should be an AbstractRocksDBState but is " + state); } @SuppressWarnings("unchecked") AbstractRocksDBState<?, ?, SV> rocksDBState = (AbstractRocksDBState<?, ?, SV>) state; Snapshot rocksDBSnapshot = db.getSnapshot(); try ( RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(db, stateMetaInfo.f0, readOptions); RocksDBWriteBatchWrapper batchWriter = new RocksDBWriteBatchWrapper(db, getWriteOptions(), getWriteBatchSize()) ) { iterator.seekToFirst(); DataInputDeserializer serializedValueInput = new DataInputDeserializer(); DataOutputSerializer migratedSerializedValueOutput = new DataOutputSerializer(512); while (iterator.isValid()) { serializedValueInput.setBuffer(iterator.value()); rocksDBState.migrateSerializedValue( serializedValueInput, migratedSerializedValueOutput, stateMetaInfo.f1.getPreviousStateSerializer(), stateMetaInfo.f1.getStateSerializer()); batchWriter.put(stateMetaInfo.f0, iterator.key(), migratedSerializedValueOutput.getCopyOfBuffer()); migratedSerializedValueOutput.clear(); iterator.next(); } } finally { db.releaseSnapshot(rocksDBSnapshot); rocksDBSnapshot.close(); } }
Example 12
Source File: HeapKeyedStateBackend.java From flink with Apache License 2.0 | 4 votes |
private <N, V> StateTable<K, N, V> tryRegisterStateTable( TypeSerializer<N> namespaceSerializer, StateDescriptor<?, V> stateDesc, @Nonnull StateSnapshotTransformFactory<V> snapshotTransformFactory) throws StateMigrationException { @SuppressWarnings("unchecked") StateTable<K, N, V> stateTable = (StateTable<K, N, V>) registeredKVStates.get(stateDesc.getName()); TypeSerializer<V> newStateSerializer = stateDesc.getSerializer(); if (stateTable != null) { RegisteredKeyValueStateBackendMetaInfo<N, V> restoredKvMetaInfo = stateTable.getMetaInfo(); restoredKvMetaInfo.updateSnapshotTransformFactory(snapshotTransformFactory); TypeSerializerSchemaCompatibility<N> namespaceCompatibility = restoredKvMetaInfo.updateNamespaceSerializer(namespaceSerializer); if (namespaceCompatibility.isCompatibleAfterMigration() || namespaceCompatibility.isIncompatible()) { throw new StateMigrationException("For heap backends, the new namespace serializer must be compatible."); } restoredKvMetaInfo.checkStateMetaInfo(stateDesc); TypeSerializerSchemaCompatibility<V> stateCompatibility = restoredKvMetaInfo.updateStateSerializer(newStateSerializer); if (stateCompatibility.isIncompatible()) { throw new StateMigrationException("For heap backends, the new state serializer must not be incompatible."); } stateTable.setMetaInfo(restoredKvMetaInfo); } else { RegisteredKeyValueStateBackendMetaInfo<N, V> newMetaInfo = new RegisteredKeyValueStateBackendMetaInfo<>( stateDesc.getType(), stateDesc.getName(), namespaceSerializer, newStateSerializer, snapshotTransformFactory); stateTable = snapshotStrategy.newStateTable(keyContext, newMetaInfo, keySerializer); registeredKVStates.put(stateDesc.getName(), stateTable); } return stateTable; }