org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer Java Examples
The following examples show how to use
org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestCoordinationRequestHandler.java From flink with Apache License 2.0 | 5 votes |
private void buildAccumulatorResults() { List<byte[]> finalResults = CollectTestUtils.toBytesList(buffered, serializer); SerializedListAccumulator<byte[]> listAccumulator = new SerializedListAccumulator<>(); try { byte[] serializedResult = CollectSinkFunction.serializeAccumulatorResult(offset, version, checkpointedOffset, finalResults); listAccumulator.add(serializedResult, BytePrimitiveArraySerializer.INSTANCE); } catch (IOException e) { throw new RuntimeException(e); } accumulatorResults.put(accumulatorName, OptionalFailure.of(listAccumulator.getLocalValue())); }
Example #2
Source File: HashTableTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public HashTableTest() { TypeSerializer<?>[] fieldSerializers = { LongSerializer.INSTANCE, BytePrimitiveArraySerializer.INSTANCE }; @SuppressWarnings("unchecked") Class<Tuple2<Long, byte[]>> clazz = (Class<Tuple2<Long, byte[]>>) (Class<?>) Tuple2.class; this.buildSerializer = new TupleSerializer<Tuple2<Long, byte[]>>(clazz, fieldSerializers); this.probeSerializer = LongSerializer.INSTANCE; TypeComparator<?>[] comparators = { new LongComparator(true) }; TypeSerializer<?>[] comparatorSerializers = { LongSerializer.INSTANCE }; this.buildComparator = new TupleComparator<Tuple2<Long, byte[]>>(new int[] {0}, comparators, comparatorSerializers); this.probeComparator = new LongComparator(true); this.pairComparator = new TypePairComparator<Long, Tuple2<Long, byte[]>>() { private long ref; @Override public void setReference(Long reference) { ref = reference; } @Override public boolean equalToReference(Tuple2<Long, byte[]> candidate) { //noinspection UnnecessaryUnboxing return candidate.f0.longValue() == ref; } @Override public int compareToReference(Tuple2<Long, byte[]> candidate) { long x = ref; long y = candidate.f0; return (x < y) ? -1 : ((x == y) ? 0 : 1); } }; }
Example #3
Source File: CollectSinkFunctionTest.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private Tuple2<Long, CollectCoordinationResponse> getAccumualtorResults() throws Exception { Accumulator accumulator = runtimeContext.getAccumulator(ACCUMULATOR_NAME); ArrayList<byte[]> accLocalValue = ((SerializedListAccumulator) accumulator).getLocalValue(); List<byte[]> serializedResults = SerializedListAccumulator.deserializeList(accLocalValue, BytePrimitiveArraySerializer.INSTANCE); Assert.assertEquals(1, serializedResults.size()); byte[] serializedResult = serializedResults.get(0); return CollectSinkFunction.deserializeAccumulatorResult(serializedResult); }
Example #4
Source File: CollectSinkFunction.java From flink with Apache License 2.0 | 5 votes |
public void accumulateFinalResults() throws Exception { bufferLock.lock(); try { // put results not consumed by the client into the accumulator // so that we do not block the closing procedure while not throwing results away SerializedListAccumulator<byte[]> accumulator = new SerializedListAccumulator<>(); accumulator.add( serializeAccumulatorResult(offset, version, lastCheckpointedOffset, buffer), BytePrimitiveArraySerializer.INSTANCE); getRuntimeContext().addAccumulator(accumulatorName, accumulator); } finally { bufferLock.unlock(); } }
Example #5
Source File: AbstractSiddhiOperator.java From bahir-flink with Apache License 2.0 | 5 votes |
@Override public void initializeState(StateInitializationContext context) throws Exception { super.initializeState(context); if (siddhiRuntimeState == null) { siddhiRuntimeState = context.getOperatorStateStore().getUnionListState(new ListStateDescriptor<>(SIDDHI_RUNTIME_STATE_NAME, new BytePrimitiveArraySerializer())); } if (queuedRecordsState == null) { queuedRecordsState = context.getOperatorStateStore().getListState( new ListStateDescriptor<>(QUEUED_RECORDS_STATE_NAME, new BytePrimitiveArraySerializer())); } if (context.isRestored()) { restoreState(); } }
Example #6
Source File: HashTableTest.java From flink with Apache License 2.0 | 5 votes |
public HashTableTest() { TypeSerializer<?>[] fieldSerializers = { LongSerializer.INSTANCE, BytePrimitiveArraySerializer.INSTANCE }; @SuppressWarnings("unchecked") Class<Tuple2<Long, byte[]>> clazz = (Class<Tuple2<Long, byte[]>>) (Class<?>) Tuple2.class; this.buildSerializer = new TupleSerializer<Tuple2<Long, byte[]>>(clazz, fieldSerializers); this.probeSerializer = LongSerializer.INSTANCE; TypeComparator<?>[] comparators = { new LongComparator(true) }; TypeSerializer<?>[] comparatorSerializers = { LongSerializer.INSTANCE }; this.buildComparator = new TupleComparator<Tuple2<Long, byte[]>>(new int[] {0}, comparators, comparatorSerializers); this.probeComparator = new LongComparator(true); this.pairComparator = new TypePairComparator<Long, Tuple2<Long, byte[]>>() { private long ref; @Override public void setReference(Long reference) { ref = reference; } @Override public boolean equalToReference(Tuple2<Long, byte[]> candidate) { //noinspection UnnecessaryUnboxing return candidate.f0.longValue() == ref; } @Override public int compareToReference(Tuple2<Long, byte[]> candidate) { long x = ref; long y = candidate.f0; return (x < y) ? -1 : ((x == y) ? 0 : 1); } }; }
Example #7
Source File: AbstractSiddhiOperator.java From flink-siddhi with Apache License 2.0 | 5 votes |
@Override public void initializeState(StateInitializationContext context) throws Exception { super.initializeState(context); if (siddhiRuntimeState == null) { siddhiRuntimeState = context.getOperatorStateStore().getUnionListState(new ListStateDescriptor<>(SIDDHI_RUNTIME_STATE_NAME, new BytePrimitiveArraySerializer())); } if (queuedRecordsState == null) { queuedRecordsState = context.getOperatorStateStore().getListState( new ListStateDescriptor<>(QUEUED_RECORDS_STATE_NAME, new BytePrimitiveArraySerializer())); } if (context.isRestored()) { restoreState(); } }
Example #8
Source File: RocksDBSavepointIterator.java From bravo with Apache License 2.0 | 5 votes |
private final KeyedStateRow nextRecord(KeyedStateRow reuse) throws Exception { if (!openIfNeeded()) { return null; } byte[] key = BytePrimitiveArraySerializer.INSTANCE.deserialize(compressedInputView); byte[] value = BytePrimitiveArraySerializer.INSTANCE.deserialize(compressedInputView); reuse.f0 = stateName; reuse.f1 = key; reuse.f2 = value; if (hasMetaDataFollowsFlag(reuse.f1)) { clearMetaDataFollowsFlag(reuse.f1); seekNextStateId(true); while (stateId == END_OF_KEY_GROUP_MARK && hasNext) { hasNext = seekNextOffset(); if (hasNext) { seekNextStateId(false); } } } LOGGER.trace("{}", reuse); return reuse; }
Example #9
Source File: HashTableTest.java From flink with Apache License 2.0 | 5 votes |
public HashTableTest() { TypeSerializer<?>[] fieldSerializers = { LongSerializer.INSTANCE, BytePrimitiveArraySerializer.INSTANCE }; @SuppressWarnings("unchecked") Class<Tuple2<Long, byte[]>> clazz = (Class<Tuple2<Long, byte[]>>) (Class<?>) Tuple2.class; this.buildSerializer = new TupleSerializer<Tuple2<Long, byte[]>>(clazz, fieldSerializers); this.probeSerializer = LongSerializer.INSTANCE; TypeComparator<?>[] comparators = { new LongComparator(true) }; TypeSerializer<?>[] comparatorSerializers = { LongSerializer.INSTANCE }; this.buildComparator = new TupleComparator<Tuple2<Long, byte[]>>(new int[] {0}, comparators, comparatorSerializers); this.probeComparator = new LongComparator(true); this.pairComparator = new TypePairComparator<Long, Tuple2<Long, byte[]>>() { private long ref; @Override public void setReference(Long reference) { ref = reference; } @Override public boolean equalToReference(Tuple2<Long, byte[]> candidate) { //noinspection UnnecessaryUnboxing return candidate.f0.longValue() == ref; } @Override public int compareToReference(Tuple2<Long, byte[]> candidate) { long x = ref; long y = candidate.f0; return (x < y) ? -1 : ((x == y) ? 0 : 1); } }; }
Example #10
Source File: RocksFullSnapshotStrategy.java From flink with Apache License 2.0 | 4 votes |
private void writeKeyValuePair(byte[] key, byte[] value, DataOutputView out) throws IOException { BytePrimitiveArraySerializer.INSTANCE.serialize(key, out); BytePrimitiveArraySerializer.INSTANCE.serialize(value, out); }
Example #11
Source File: PythonTypeUtils.java From flink with Apache License 2.0 | 4 votes |
@Override public TypeSerializer visit(BinaryType binaryType) { return BytePrimitiveArraySerializer.INSTANCE; }
Example #12
Source File: PythonTypeUtils.java From flink with Apache License 2.0 | 4 votes |
@Override public TypeSerializer visit(VarBinaryType varBinaryType) { return BytePrimitiveArraySerializer.INSTANCE; }
Example #13
Source File: BytePrimitiveArraySerializerTest.java From flink with Apache License 2.0 | 4 votes |
@Override protected TypeSerializer<byte[]> createSerializer() { return new BytePrimitiveArraySerializer(); }
Example #14
Source File: RocksDBFullRestoreOperation.java From flink with Apache License 2.0 | 4 votes |
/** * Restore the KV-state / ColumnFamily data for all key-groups referenced by the current state handle. */ private void restoreKVStateData() throws IOException, RocksDBException { //for all key-groups in the current state handle... try (RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, writeBatchSize)) { for (Tuple2<Integer, Long> keyGroupOffset : currentKeyGroupsStateHandle.getGroupRangeOffsets()) { int keyGroup = keyGroupOffset.f0; // Check that restored key groups all belong to the backend Preconditions.checkState(keyGroupRange.contains(keyGroup), "The key group must belong to the backend"); long offset = keyGroupOffset.f1; //not empty key-group? if (0L != offset) { currentStateHandleInStream.seek(offset); try (InputStream compressedKgIn = keygroupStreamCompressionDecorator.decorateWithCompression(currentStateHandleInStream)) { DataInputViewStreamWrapper compressedKgInputView = new DataInputViewStreamWrapper(compressedKgIn); //TODO this could be aware of keyGroupPrefixBytes and write only one byte if possible int kvStateId = compressedKgInputView.readShort(); ColumnFamilyHandle handle = currentStateHandleKVStateColumnFamilies.get(kvStateId); //insert all k/v pairs into DB boolean keyGroupHasMoreKeys = true; while (keyGroupHasMoreKeys) { byte[] key = BytePrimitiveArraySerializer.INSTANCE.deserialize(compressedKgInputView); byte[] value = BytePrimitiveArraySerializer.INSTANCE.deserialize(compressedKgInputView); if (hasMetaDataFollowsFlag(key)) { //clear the signal bit in the key to make it ready for insertion again clearMetaDataFollowsFlag(key); writeBatchWrapper.put(handle, key, value); //TODO this could be aware of keyGroupPrefixBytes and write only one byte if possible kvStateId = END_OF_KEY_GROUP_MARK & compressedKgInputView.readShort(); if (END_OF_KEY_GROUP_MARK == kvStateId) { keyGroupHasMoreKeys = false; } else { handle = currentStateHandleKVStateColumnFamilies.get(kvStateId); } } else { writeBatchWrapper.put(handle, key, value); } } } } } } }
Example #15
Source File: KvStateServerHandlerTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that large responses are chunked. */ @Test public void testChunkedResponse() throws Exception { KvStateRegistry registry = new KvStateRegistry(); KvStateRequestStats stats = new AtomicKvStateRequestStats(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>(new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); KvStateServerHandler handler = new KvStateServerHandler(testServer, registry, serializer, stats); EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler); int numKeyGroups = 1; AbstractStateBackend abstractBackend = new MemoryStateBackend(); DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0); dummyEnv.setKvStateRegistry(registry); AbstractKeyedStateBackend<Integer> backend = createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv); final TestRegistryListener registryListener = new TestRegistryListener(); registry.registerListener(dummyEnv.getJobID(), registryListener); // Register state ValueStateDescriptor<byte[]> desc = new ValueStateDescriptor<>("any", BytePrimitiveArraySerializer.INSTANCE); desc.setQueryable("vanilla"); ValueState<byte[]> state = backend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc); // Update KvState byte[] bytes = new byte[2 * channel.config().getWriteBufferHighWaterMark()]; byte current = 0; for (int i = 0; i < bytes.length; i++) { bytes[i] = current++; } int key = 99812822; backend.setCurrentKey(key); state.update(bytes); // Request byte[] serializedKeyAndNamespace = KvStateSerializer.serializeKeyAndNamespace( key, IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE); long requestId = Integer.MAX_VALUE + 182828L; assertTrue(registryListener.registrationName.equals("vanilla")); KvStateInternalRequest request = new KvStateInternalRequest(registryListener.kvStateId, serializedKeyAndNamespace); ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), requestId, request); // Write the request and wait for the response channel.writeInbound(serRequest); Object msg = readInboundBlocking(channel); assertTrue("Not ChunkedByteBuf", msg instanceof ChunkedByteBuf); ((ChunkedByteBuf) msg).close(); }
Example #16
Source File: HashTableTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that the MutableHashTable spills its partitions when creating the initial table * without overflow segments in the partitions. This means that the records are large. */ @Test public void testSpillingWhenBuildingTableWithoutOverflow() throws Exception { try (final IOManager ioMan = new IOManagerAsync()) { final TypeSerializer<byte[]> serializer = BytePrimitiveArraySerializer.INSTANCE; final TypeComparator<byte[]> buildComparator = new BytePrimitiveArrayComparator(true); final TypeComparator<byte[]> probeComparator = new BytePrimitiveArrayComparator(true); @SuppressWarnings("unchecked") final TypePairComparator<byte[], byte[]> pairComparator = new GenericPairComparator<>( new BytePrimitiveArrayComparator(true), new BytePrimitiveArrayComparator(true)); final int pageSize = 128; final int numSegments = 33; List<MemorySegment> memory = getMemory(numSegments, pageSize); MutableHashTable<byte[], byte[]> table = new MutableHashTable<byte[], byte[]>( serializer, serializer, buildComparator, probeComparator, pairComparator, memory, ioMan, 1, false); int numElements = 9; table.open( new CombiningIterator<byte[]>( new ByteArrayIterator(numElements, 128, (byte) 0), new ByteArrayIterator(numElements, 128, (byte) 1)), new CombiningIterator<byte[]>( new ByteArrayIterator(1, 128, (byte) 0), new ByteArrayIterator(1, 128, (byte) 1))); while (table.nextRecord()) { MutableObjectIterator<byte[]> iterator = table.getBuildSideIterator(); int counter = 0; while (iterator.next() != null) { counter++; } // check that we retrieve all our elements Assert.assertEquals(numElements, counter); } table.close(); } }
Example #17
Source File: BytePrimitiveArraySerializerTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override protected TypeSerializer<byte[]> createSerializer() { return new BytePrimitiveArraySerializer(); }
Example #18
Source File: InternalSerializers.java From flink with Apache License 2.0 | 4 votes |
/** * Creates a {@link TypeSerializer} for internal data structures of the given {@link LogicalType}. */ public static TypeSerializer create(LogicalType type, ExecutionConfig config) { // ordered by type root definition switch (type.getTypeRoot()) { case CHAR: case VARCHAR: return StringDataSerializer.INSTANCE; case BOOLEAN: return BooleanSerializer.INSTANCE; case BINARY: case VARBINARY: return BytePrimitiveArraySerializer.INSTANCE; case DECIMAL: return new DecimalDataSerializer(getPrecision(type), getScale(type)); case TINYINT: return ByteSerializer.INSTANCE; case SMALLINT: return ShortSerializer.INSTANCE; case INTEGER: case DATE: case TIME_WITHOUT_TIME_ZONE: case INTERVAL_YEAR_MONTH: return IntSerializer.INSTANCE; case BIGINT: case INTERVAL_DAY_TIME: return LongSerializer.INSTANCE; case FLOAT: return FloatSerializer.INSTANCE; case DOUBLE: return DoubleSerializer.INSTANCE; case TIMESTAMP_WITHOUT_TIME_ZONE: case TIMESTAMP_WITH_LOCAL_TIME_ZONE: return new TimestampDataSerializer(getPrecision(type)); case TIMESTAMP_WITH_TIME_ZONE: throw new UnsupportedOperationException(); case ARRAY: return new ArrayDataSerializer(((ArrayType) type).getElementType(), config); case MULTISET: return new MapDataSerializer(((MultisetType) type).getElementType(), new IntType(false), config); case MAP: MapType mapType = (MapType) type; return new MapDataSerializer(mapType.getKeyType(), mapType.getValueType(), config); case ROW: case STRUCTURED_TYPE: return new RowDataSerializer(config, type.getChildren().toArray(new LogicalType[0])); case DISTINCT_TYPE: return create(((DistinctType) type).getSourceType(), config); case RAW: if (type instanceof RawType) { final RawType<?> rawType = (RawType<?>) type; return new RawValueDataSerializer<>(rawType.getTypeSerializer()); } return new RawValueDataSerializer<>( ((TypeInformationRawType<?>) type).getTypeInformation().createSerializer(config)); case NULL: case SYMBOL: case UNRESOLVED: default: throw new UnsupportedOperationException( "Unsupported type '" + type + "' to get internal serializer"); } }
Example #19
Source File: KvStateServerHandlerTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that large responses are chunked. */ @Test public void testChunkedResponse() throws Exception { KvStateRegistry registry = new KvStateRegistry(); KvStateRequestStats stats = new AtomicKvStateRequestStats(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>(new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); KvStateServerHandler handler = new KvStateServerHandler(testServer, registry, serializer, stats); EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler); int numKeyGroups = 1; AbstractStateBackend abstractBackend = new MemoryStateBackend(); DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0); dummyEnv.setKvStateRegistry(registry); AbstractKeyedStateBackend<Integer> backend = createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv); final TestRegistryListener registryListener = new TestRegistryListener(); registry.registerListener(dummyEnv.getJobID(), registryListener); // Register state ValueStateDescriptor<byte[]> desc = new ValueStateDescriptor<>("any", BytePrimitiveArraySerializer.INSTANCE); desc.setQueryable("vanilla"); ValueState<byte[]> state = backend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc); // Update KvState byte[] bytes = new byte[2 * channel.config().getWriteBufferHighWaterMark()]; byte current = 0; for (int i = 0; i < bytes.length; i++) { bytes[i] = current++; } int key = 99812822; backend.setCurrentKey(key); state.update(bytes); // Request byte[] serializedKeyAndNamespace = KvStateSerializer.serializeKeyAndNamespace( key, IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE); long requestId = Integer.MAX_VALUE + 182828L; assertTrue(registryListener.registrationName.equals("vanilla")); KvStateInternalRequest request = new KvStateInternalRequest(registryListener.kvStateId, serializedKeyAndNamespace); ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), requestId, request); // Write the request and wait for the response channel.writeInbound(serRequest); Object msg = readInboundBlocking(channel); assertTrue("Not ChunkedByteBuf", msg instanceof ChunkedByteBuf); }
Example #20
Source File: HashTableTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that the MutableHashTable spills its partitions when creating the initial table * without overflow segments in the partitions. This means that the records are large. */ @Test public void testSpillingWhenBuildingTableWithoutOverflow() throws Exception { try (final IOManager ioMan = new IOManagerAsync()) { final TypeSerializer<byte[]> serializer = BytePrimitiveArraySerializer.INSTANCE; final TypeComparator<byte[]> buildComparator = new BytePrimitiveArrayComparator(true); final TypeComparator<byte[]> probeComparator = new BytePrimitiveArrayComparator(true); @SuppressWarnings("unchecked") final TypePairComparator<byte[], byte[]> pairComparator = new GenericPairComparator<>( new BytePrimitiveArrayComparator(true), new BytePrimitiveArrayComparator(true)); final int pageSize = 128; final int numSegments = 33; List<MemorySegment> memory = getMemory(numSegments, pageSize); MutableHashTable<byte[], byte[]> table = new MutableHashTable<byte[], byte[]>( serializer, serializer, buildComparator, probeComparator, pairComparator, memory, ioMan, 1, false); int numElements = 9; table.open( new CombiningIterator<byte[]>( new ByteArrayIterator(numElements, 128, (byte) 0), new ByteArrayIterator(numElements, 128, (byte) 1)), new CombiningIterator<byte[]>( new ByteArrayIterator(1, 128, (byte) 0), new ByteArrayIterator(1, 128, (byte) 1))); while (table.nextRecord()) { MutableObjectIterator<byte[]> iterator = table.getBuildSideIterator(); int counter = 0; while (iterator.next() != null) { counter++; } // check that we retrieve all our elements Assert.assertEquals(numElements, counter); } table.close(); } }
Example #21
Source File: RocksFullSnapshotStrategy.java From flink with Apache License 2.0 | 4 votes |
private void writeKeyValuePair(byte[] key, byte[] value, DataOutputView out) throws IOException { BytePrimitiveArraySerializer.INSTANCE.serialize(key, out); BytePrimitiveArraySerializer.INSTANCE.serialize(value, out); }
Example #22
Source File: RocksDBFullRestoreOperation.java From flink with Apache License 2.0 | 4 votes |
/** * Restore the KV-state / ColumnFamily data for all key-groups referenced by the current state handle. */ private void restoreKVStateData() throws IOException, RocksDBException { //for all key-groups in the current state handle... try (RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db)) { for (Tuple2<Integer, Long> keyGroupOffset : currentKeyGroupsStateHandle.getGroupRangeOffsets()) { int keyGroup = keyGroupOffset.f0; // Check that restored key groups all belong to the backend Preconditions.checkState(keyGroupRange.contains(keyGroup), "The key group must belong to the backend"); long offset = keyGroupOffset.f1; //not empty key-group? if (0L != offset) { currentStateHandleInStream.seek(offset); try (InputStream compressedKgIn = keygroupStreamCompressionDecorator.decorateWithCompression(currentStateHandleInStream)) { DataInputViewStreamWrapper compressedKgInputView = new DataInputViewStreamWrapper(compressedKgIn); //TODO this could be aware of keyGroupPrefixBytes and write only one byte if possible int kvStateId = compressedKgInputView.readShort(); ColumnFamilyHandle handle = currentStateHandleKVStateColumnFamilies.get(kvStateId); //insert all k/v pairs into DB boolean keyGroupHasMoreKeys = true; while (keyGroupHasMoreKeys) { byte[] key = BytePrimitiveArraySerializer.INSTANCE.deserialize(compressedKgInputView); byte[] value = BytePrimitiveArraySerializer.INSTANCE.deserialize(compressedKgInputView); if (hasMetaDataFollowsFlag(key)) { //clear the signal bit in the key to make it ready for insertion again clearMetaDataFollowsFlag(key); writeBatchWrapper.put(handle, key, value); //TODO this could be aware of keyGroupPrefixBytes and write only one byte if possible kvStateId = END_OF_KEY_GROUP_MARK & compressedKgInputView.readShort(); if (END_OF_KEY_GROUP_MARK == kvStateId) { keyGroupHasMoreKeys = false; } else { handle = currentStateHandleKVStateColumnFamilies.get(kvStateId); } } else { writeBatchWrapper.put(handle, key, value); } } } } } } }
Example #23
Source File: BytePrimitiveArraySerializerTest.java From flink with Apache License 2.0 | 4 votes |
@Override protected TypeSerializer<byte[]> createSerializer() { return new BytePrimitiveArraySerializer(); }
Example #24
Source File: InternalSerializers.java From flink with Apache License 2.0 | 4 votes |
public static TypeSerializer create(LogicalType type, ExecutionConfig config) { switch (type.getTypeRoot()) { case BOOLEAN: return BooleanSerializer.INSTANCE; case TINYINT: return ByteSerializer.INSTANCE; case SMALLINT: return ShortSerializer.INSTANCE; case INTEGER: case DATE: case TIME_WITHOUT_TIME_ZONE: case INTERVAL_YEAR_MONTH: return IntSerializer.INSTANCE; case BIGINT: case TIMESTAMP_WITHOUT_TIME_ZONE: case TIMESTAMP_WITH_LOCAL_TIME_ZONE: case INTERVAL_DAY_TIME: return LongSerializer.INSTANCE; case FLOAT: return FloatSerializer.INSTANCE; case DOUBLE: return DoubleSerializer.INSTANCE; case CHAR: case VARCHAR: return BinaryStringSerializer.INSTANCE; case DECIMAL: DecimalType decimalType = (DecimalType) type; return new DecimalSerializer(decimalType.getPrecision(), decimalType.getScale()); case ARRAY: return new BaseArraySerializer(((ArrayType) type).getElementType(), config); case MAP: MapType mapType = (MapType) type; return new BaseMapSerializer(mapType.getKeyType(), mapType.getValueType(), config); case MULTISET: return new BaseMapSerializer(((MultisetType) type).getElementType(), new IntType(), config); case ROW: RowType rowType = (RowType) type; return new BaseRowSerializer(config, rowType); case BINARY: case VARBINARY: return BytePrimitiveArraySerializer.INSTANCE; case ANY: return new BinaryGenericSerializer( ((TypeInformationAnyType) type).getTypeInformation().createSerializer(config)); default: throw new RuntimeException("Not support type: " + type); } }
Example #25
Source File: KvStateServerHandlerTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests that large responses are chunked. */ @Test public void testChunkedResponse() throws Exception { KvStateRegistry registry = new KvStateRegistry(); KvStateRequestStats stats = new AtomicKvStateRequestStats(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>(new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); KvStateServerHandler handler = new KvStateServerHandler(testServer, registry, serializer, stats); EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler); int numKeyGroups = 1; AbstractStateBackend abstractBackend = new MemoryStateBackend(); DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0); dummyEnv.setKvStateRegistry(registry); AbstractKeyedStateBackend<Integer> backend = createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv); final TestRegistryListener registryListener = new TestRegistryListener(); registry.registerListener(dummyEnv.getJobID(), registryListener); // Register state ValueStateDescriptor<byte[]> desc = new ValueStateDescriptor<>("any", BytePrimitiveArraySerializer.INSTANCE); desc.setQueryable("vanilla"); ValueState<byte[]> state = backend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc); // Update KvState byte[] bytes = new byte[2 * channel.config().getWriteBufferHighWaterMark()]; byte current = 0; for (int i = 0; i < bytes.length; i++) { bytes[i] = current++; } int key = 99812822; backend.setCurrentKey(key); state.update(bytes); // Request byte[] serializedKeyAndNamespace = KvStateSerializer.serializeKeyAndNamespace( key, IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE); long requestId = Integer.MAX_VALUE + 182828L; assertTrue(registryListener.registrationName.equals("vanilla")); KvStateInternalRequest request = new KvStateInternalRequest(registryListener.kvStateId, serializedKeyAndNamespace); ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), requestId, request); // Write the request and wait for the response channel.writeInbound(serRequest); Object msg = readInboundBlocking(channel); assertTrue("Not ChunkedByteBuf", msg instanceof ChunkedByteBuf); }
Example #26
Source File: HashTableTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests that the MutableHashTable spills its partitions when creating the initial table * without overflow segments in the partitions. This means that the records are large. */ @Test public void testSpillingWhenBuildingTableWithoutOverflow() throws Exception { final IOManager ioMan = new IOManagerAsync(); try { final TypeSerializer<byte[]> serializer = BytePrimitiveArraySerializer.INSTANCE; final TypeComparator<byte[]> buildComparator = new BytePrimitiveArrayComparator(true); final TypeComparator<byte[]> probeComparator = new BytePrimitiveArrayComparator(true); @SuppressWarnings("unchecked") final TypePairComparator<byte[], byte[]> pairComparator = new GenericPairComparator<>( new BytePrimitiveArrayComparator(true), new BytePrimitiveArrayComparator(true)); final int pageSize = 128; final int numSegments = 33; List<MemorySegment> memory = getMemory(numSegments, pageSize); MutableHashTable<byte[], byte[]> table = new MutableHashTable<byte[], byte[]>( serializer, serializer, buildComparator, probeComparator, pairComparator, memory, ioMan, 1, false); int numElements = 9; table.open( new CombiningIterator<byte[]>( new ByteArrayIterator(numElements, 128, (byte) 0), new ByteArrayIterator(numElements, 128, (byte) 1)), new CombiningIterator<byte[]>( new ByteArrayIterator(1, 128, (byte) 0), new ByteArrayIterator(1, 128, (byte) 1))); while (table.nextRecord()) { MutableObjectIterator<byte[]> iterator = table.getBuildSideIterator(); int counter = 0; while (iterator.next() != null) { counter++; } // check that we retrieve all our elements Assert.assertEquals(numElements, counter); } table.close(); } finally { ioMan.shutdown(); } }
Example #27
Source File: RocksFullSnapshotStrategy.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
private void writeKeyValuePair(byte[] key, byte[] value, DataOutputView out) throws IOException { BytePrimitiveArraySerializer.INSTANCE.serialize(key, out); BytePrimitiveArraySerializer.INSTANCE.serialize(value, out); }
Example #28
Source File: RocksDBFullRestoreOperation.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Restore the KV-state / ColumnFamily data for all key-groups referenced by the current state handle. */ private void restoreKVStateData() throws IOException, RocksDBException { //for all key-groups in the current state handle... try (RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db)) { for (Tuple2<Integer, Long> keyGroupOffset : currentKeyGroupsStateHandle.getGroupRangeOffsets()) { int keyGroup = keyGroupOffset.f0; // Check that restored key groups all belong to the backend Preconditions.checkState(keyGroupRange.contains(keyGroup), "The key group must belong to the backend"); long offset = keyGroupOffset.f1; //not empty key-group? if (0L != offset) { currentStateHandleInStream.seek(offset); try (InputStream compressedKgIn = keygroupStreamCompressionDecorator.decorateWithCompression(currentStateHandleInStream)) { DataInputViewStreamWrapper compressedKgInputView = new DataInputViewStreamWrapper(compressedKgIn); //TODO this could be aware of keyGroupPrefixBytes and write only one byte if possible int kvStateId = compressedKgInputView.readShort(); ColumnFamilyHandle handle = currentStateHandleKVStateColumnFamilies.get(kvStateId); //insert all k/v pairs into DB boolean keyGroupHasMoreKeys = true; while (keyGroupHasMoreKeys) { byte[] key = BytePrimitiveArraySerializer.INSTANCE.deserialize(compressedKgInputView); byte[] value = BytePrimitiveArraySerializer.INSTANCE.deserialize(compressedKgInputView); if (hasMetaDataFollowsFlag(key)) { //clear the signal bit in the key to make it ready for insertion again clearMetaDataFollowsFlag(key); writeBatchWrapper.put(handle, key, value); //TODO this could be aware of keyGroupPrefixBytes and write only one byte if possible kvStateId = END_OF_KEY_GROUP_MARK & compressedKgInputView.readShort(); if (END_OF_KEY_GROUP_MARK == kvStateId) { keyGroupHasMoreKeys = false; } else { handle = currentStateHandleKVStateColumnFamilies.get(kvStateId); } } else { writeBatchWrapper.put(handle, key, value); } } } } } } }