org.apache.flink.shaded.netty4.io.netty.buffer.ByteBuf Java Examples
The following examples show how to use
org.apache.flink.shaded.netty4.io.netty.buffer.ByteBuf.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MessageSerializer.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Serializes the failure message sent to the * {@link org.apache.flink.queryablestate.network.Client} in case of * server related errors. * * @param alloc The {@link ByteBufAllocator} used to allocate the buffer to serialize the message into. * @param cause The exception thrown at the server. * @return The failure message. */ public static ByteBuf serializeServerFailure( final ByteBufAllocator alloc, final Throwable cause) throws IOException { final ByteBuf buf = alloc.ioBuffer(); // Frame length is set at end buf.writeInt(0); writeHeader(buf, MessageType.SERVER_FAILURE); try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf); ObjectOutput out = new ObjectOutputStream(bbos)) { out.writeObject(cause); } // Set frame length int frameLength = buf.readableBytes() - Integer.BYTES; buf.setInt(0, frameLength); return buf; }
Example #2
Source File: ReadOnlySlicedBufferTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private static void assertReadableBytes(Buffer actualBuffer, int... expectedBytes) { ByteBuffer actualBytesBuffer = actualBuffer.getNioBufferReadable(); int[] actual = new int[actualBytesBuffer.limit()]; for (int i = 0; i < actual.length; ++i) { actual[i] = actualBytesBuffer.get(); } assertArrayEquals(expectedBytes, actual); // verify absolutely positioned read method: ByteBuf buffer = (ByteBuf) actualBuffer; for (int i = 0; i < buffer.readableBytes(); ++i) { actual[i] = buffer.getByte(buffer.readerIndex() + i); } assertArrayEquals(expectedBytes, actual); // verify relatively positioned read method: for (int i = 0; i < buffer.readableBytes(); ++i) { actual[i] = buffer.readByte(); } assertArrayEquals(expectedBytes, actual); }
Example #3
Source File: AbstractByteBufTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testRandomByteArrayTransfer1() { byte[] value = new byte[BLOCK_SIZE]; for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { random.nextBytes(value); buffer.setBytes(i, value); } random.setSeed(seed); byte[] expectedValueContent = new byte[BLOCK_SIZE]; ByteBuf expectedValue = wrappedBuffer(expectedValueContent); for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { random.nextBytes(expectedValueContent); buffer.getBytes(i, value); for (int j = 0; j < BLOCK_SIZE; j ++) { assertEquals(expectedValue.getByte(j), value[j]); } } }
Example #4
Source File: AbstractByteBufTest.java From flink with Apache License 2.0 | 6 votes |
private void testInternalNioBuffer(int a) { ByteBuf buffer = newBuffer(2); ByteBuffer buf = buffer.internalNioBuffer(buffer.readerIndex(), 1); assertEquals(1, buf.remaining()); byte[] data = new byte[a]; PlatformDependent.threadLocalRandom().nextBytes(data); buffer.writeBytes(data); buf = buffer.internalNioBuffer(buffer.readerIndex(), a); assertEquals(a, buf.remaining()); for (int i = 0; i < a; i++) { assertEquals(data[i], buf.get()); } assertFalse(buf.hasRemaining()); buffer.release(); }
Example #5
Source File: MessageSerializer.java From flink with Apache License 2.0 | 6 votes |
/** * Serializes the exception containing the failure message sent to the * {@link org.apache.flink.queryablestate.network.Client} in case of * protocol related errors. * * @param alloc The {@link ByteBufAllocator} used to allocate the buffer to serialize the message into. * @param requestId The id of the request to which the message refers to. * @param cause The exception thrown at the server. * @return A {@link ByteBuf} containing the serialized message. */ public static ByteBuf serializeRequestFailure( final ByteBufAllocator alloc, final long requestId, final Throwable cause) throws IOException { final ByteBuf buf = alloc.ioBuffer(); // Frame length is set at the end buf.writeInt(0); writeHeader(buf, MessageType.REQUEST_FAILURE); buf.writeLong(requestId); try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf); ObjectOutput out = new ObjectOutputStream(bbos)) { out.writeObject(cause); } // Set frame length int frameLength = buf.readableBytes() - Integer.BYTES; buf.setInt(0, frameLength); return buf; }
Example #6
Source File: NettyMessage.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override protected ByteBuf extractFrame(ChannelHandlerContext ctx, ByteBuf buffer, int index, int length) { if (restoreOldNettyBehaviour) { /* * For non-credit based code paths with Netty >= 4.0.28.Final: * These versions contain an improvement by Netty, which slices a Netty buffer * instead of doing a memory copy [1] in the * LengthFieldBasedFrameDecoder. In some situations, this * interacts badly with our Netty pipeline leading to OutOfMemory * errors. * * [1] https://github.com/netty/netty/issues/3704 * * TODO: remove along with the non-credit based fallback protocol */ ByteBuf frame = ctx.alloc().buffer(length); frame.writeBytes(buffer, index, length); return frame; } else { return super.extractFrame(ctx, buffer, index, length); } }
Example #7
Source File: AbstractByteBufTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testSequentialCopiedBufferTransfer1() { buffer.writerIndex(0); for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { byte[] value = new byte[BLOCK_SIZE]; random.nextBytes(value); assertEquals(0, buffer.readerIndex()); assertEquals(i, buffer.writerIndex()); buffer.writeBytes(value); } random.setSeed(seed); byte[] expectedValue = new byte[BLOCK_SIZE]; for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { random.nextBytes(expectedValue); assertEquals(i, buffer.readerIndex()); assertEquals(CAPACITY, buffer.writerIndex()); ByteBuf actualValue = buffer.readBytes(BLOCK_SIZE); assertEquals(wrappedBuffer(expectedValue), actualValue); // Make sure if it is a copied buffer. actualValue.setByte(0, (byte) (actualValue.getByte(0) + 1)); assertFalse(buffer.getByte(i) == actualValue.getByte(0)); actualValue.release(); } }
Example #8
Source File: NettyMessage.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
static TaskEventRequest readFrom(ByteBuf buffer, ClassLoader classLoader) throws IOException { // directly deserialize fromNetty's buffer int length = buffer.readInt(); ByteBuffer serializedEvent = buffer.nioBuffer(buffer.readerIndex(), length); // assume this event's content is read from the ByteBuf (positions are not shared!) buffer.readerIndex(buffer.readerIndex() + length); TaskEvent event = (TaskEvent) EventSerializer.fromSerializedEvent(serializedEvent, classLoader); ResultPartitionID partitionId = new ResultPartitionID( IntermediateResultPartitionID.fromByteBuf(buffer), ExecutionAttemptID.fromByteBuf(buffer)); InputChannelID receiverId = InputChannelID.fromByteBuf(buffer); return new TaskEventRequest(event, partitionId, receiverId); }
Example #9
Source File: NettyMessage.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override ByteBuf write(ByteBufAllocator allocator) throws Exception { ByteBuf result = null; try { result = allocateBuffer(allocator, ID, 16); receiverId.writeTo(result); } catch (Throwable t) { if (result != null) { result.release(); } throw new IOException(t); } return result; }
Example #10
Source File: NettyMessage.java From flink with Apache License 2.0 | 6 votes |
/** * Allocates a new buffer and adds some header information for the frame decoder. * * <p>If the <tt>contentLength</tt> is unknown, you must write the actual length after adding * the contents as an integer to position <tt>0</tt>! * * @param allocator * byte buffer allocator to use * @param id * {@link NettyMessage} subclass ID * @param messageHeaderLength * additional header length that should be part of the allocated buffer and is written * outside of this method * @param contentLength * content length (or <tt>-1</tt> if unknown) * @param allocateForContent * whether to make room for the actual content in the buffer (<tt>true</tt>) or whether to * only return a buffer with the header information (<tt>false</tt>) * * @return a newly allocated direct buffer with header data written for {@link * NettyMessageDecoder} */ private static ByteBuf allocateBuffer( ByteBufAllocator allocator, byte id, int messageHeaderLength, int contentLength, boolean allocateForContent) { checkArgument(contentLength <= Integer.MAX_VALUE - FRAME_HEADER_LENGTH); final ByteBuf buffer; if (!allocateForContent) { buffer = allocator.directBuffer(FRAME_HEADER_LENGTH + messageHeaderLength); } else if (contentLength != -1) { buffer = allocator.directBuffer(FRAME_HEADER_LENGTH + messageHeaderLength + contentLength); } else { // content length unknown -> start with the default initial size (rather than FRAME_HEADER_LENGTH only): buffer = allocator.directBuffer(); } buffer.writeInt(FRAME_HEADER_LENGTH + messageHeaderLength + contentLength); // may be updated later, e.g. if contentLength == -1 buffer.writeInt(MAGIC_NUMBER); buffer.writeByte(id); return buffer; }
Example #11
Source File: AbstractByteBufTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void comparableInterfaceNotViolated() { assumeFalse(buffer.isReadOnly()); buffer.writerIndex(buffer.readerIndex()); assumeTrue(buffer.writableBytes() >= 4); buffer.writeLong(0); ByteBuf buffer2 = newBuffer(CAPACITY); assumeFalse(buffer2.isReadOnly()); buffer2.writerIndex(buffer2.readerIndex()); // Write an unsigned integer that will cause buffer.getUnsignedInt() - buffer2.getUnsignedInt() to underflow the // int type and wrap around on the negative side. buffer2.writeLong(0xF0000000L); assertTrue(buffer.compareTo(buffer2) < 0); assertTrue(buffer2.compareTo(buffer) > 0); buffer2.release(); }
Example #12
Source File: NetworkBuffer.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) { // from UnpooledDirectByteBuf: checkDstIndex(index, length, dstIndex, dst.capacity()); if (dst.hasArray()) { getBytes(index, dst.array(), dst.arrayOffset() + dstIndex, length); } else if (dst.nioBufferCount() > 0) { for (ByteBuffer bb: dst.nioBuffers(dstIndex, length)) { int bbLen = bb.remaining(); getBytes(index, bb); index += bbLen; } } else { dst.setBytes(dstIndex, this, index, length); } return this; }
Example #13
Source File: ByteBufUtilsTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testAccumulateWithoutCopy() { int sourceLength = 128; int sourceReaderIndex = 32; int expectedAccumulationSize = 16; ByteBuf src = createSourceBuffer(sourceLength, sourceReaderIndex, expectedAccumulationSize); ByteBuf target = Unpooled.buffer(expectedAccumulationSize); // If src has enough data and no data has been copied yet, src will be returned without modification. ByteBuf accumulated = ByteBufUtils.accumulate(target, src, expectedAccumulationSize, target.readableBytes()); assertSame(src, accumulated); assertEquals(sourceReaderIndex, src.readerIndex()); verifyBufferContent(src, sourceReaderIndex, expectedAccumulationSize); }
Example #14
Source File: NettyMessageClientDecoderDelegateTest.java From flink with Apache License 2.0 | 6 votes |
private List<ByteBuf> partitionBuffer(ByteBuf buffer, int partitionSize) { List<ByteBuf> result = new ArrayList<>(); try { int bufferSize = buffer.readableBytes(); for (int position = 0; position < bufferSize; position += partitionSize) { int endPosition = Math.min(position + partitionSize, bufferSize); ByteBuf partitionedBuffer = ALLOCATOR.buffer(endPosition - position); partitionedBuffer.writeBytes(buffer, position, endPosition - position); result.add(partitionedBuffer); } } catch (Throwable t) { releaseBuffers(result.toArray(new ByteBuf[0])); ExceptionUtils.rethrow(t); } return result; }
Example #15
Source File: AbstractByteBufTest.java From flink with Apache License 2.0 | 6 votes |
@Test @SuppressWarnings("ForLoopThatDoesntUseLoopVariable") public void testNioBufferExposeOnlyRegion() { final ByteBuf buffer = newBuffer(8); byte[] data = new byte[8]; random.nextBytes(data); buffer.writeBytes(data); ByteBuffer nioBuf = buffer.nioBuffer(1, data.length - 2); assertEquals(0, nioBuf.position()); assertEquals(6, nioBuf.remaining()); for (int i = 1; nioBuf.hasRemaining(); i++) { assertEquals(data[i], nioBuf.get()); } buffer.release(); }
Example #16
Source File: AbstractByteBufTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testCompareTo2() { byte[] bytes = {1, 2, 3, 4}; byte[] bytesReversed = {4, 3, 2, 1}; ByteBuf buf1 = newBuffer(4).clear().writeBytes(bytes).order(ByteOrder.LITTLE_ENDIAN); ByteBuf buf2 = newBuffer(4).clear().writeBytes(bytesReversed).order(ByteOrder.LITTLE_ENDIAN); ByteBuf buf3 = newBuffer(4).clear().writeBytes(bytes).order(ByteOrder.BIG_ENDIAN); ByteBuf buf4 = newBuffer(4).clear().writeBytes(bytesReversed).order(ByteOrder.BIG_ENDIAN); try { assertEquals(buf1.compareTo(buf2), buf3.compareTo(buf4)); assertEquals(buf2.compareTo(buf1), buf4.compareTo(buf3)); assertEquals(buf1.compareTo(buf3), buf2.compareTo(buf4)); assertEquals(buf3.compareTo(buf1), buf4.compareTo(buf2)); } finally { buf1.release(); buf2.release(); buf3.release(); buf4.release(); } }
Example #17
Source File: AbstractByteBufTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testRandomHeapBufferTransfer1() { byte[] valueContent = new byte[BLOCK_SIZE]; ByteBuf value = wrappedBuffer(valueContent); for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { random.nextBytes(valueContent); value.setIndex(0, BLOCK_SIZE); buffer.setBytes(i, value); assertEquals(BLOCK_SIZE, value.readerIndex()); assertEquals(BLOCK_SIZE, value.writerIndex()); } random.setSeed(seed); byte[] expectedValueContent = new byte[BLOCK_SIZE]; ByteBuf expectedValue = wrappedBuffer(expectedValueContent); for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { random.nextBytes(expectedValueContent); value.clear(); buffer.getBytes(i, value); assertEquals(0, value.readerIndex()); assertEquals(BLOCK_SIZE, value.writerIndex()); for (int j = 0; j < BLOCK_SIZE; j ++) { assertEquals(expectedValue.getByte(j), value.getByte(j)); } } }
Example #18
Source File: PartitionRequestClientHandlerTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Returns a deserialized buffer message as it would be received during runtime. */ static BufferResponse createBufferResponse( Buffer buffer, int sequenceNumber, InputChannelID receivingChannelId, int backlog) throws IOException { // Mock buffer to serialize BufferResponse resp = new BufferResponse(buffer, sequenceNumber, receivingChannelId, backlog); ByteBuf serialized = resp.write(UnpooledByteBufAllocator.DEFAULT); // Skip general header bytes serialized.readBytes(NettyMessage.FRAME_HEADER_LENGTH); // Deserialize the bytes again. We have to go this way, because we only partly deserialize // the header of the response and wait for a buffer from the buffer pool to copy the payload // data into. BufferResponse deserialized = BufferResponse.readFrom(serialized); return deserialized; }
Example #19
Source File: MessageSerializerTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Tests request serialization. */ @Test public void testRequestSerialization() throws Exception { long requestId = Integer.MAX_VALUE + 1337L; KvStateID kvStateId = new KvStateID(); byte[] serializedKeyAndNamespace = randomByteArray(1024); final KvStateInternalRequest request = new KvStateInternalRequest(kvStateId, serializedKeyAndNamespace); final MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>(new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); ByteBuf buf = MessageSerializer.serializeRequest(alloc, requestId, request); int frameLength = buf.readInt(); assertEquals(MessageType.REQUEST, MessageSerializer.deserializeHeader(buf)); assertEquals(requestId, MessageSerializer.getRequestId(buf)); KvStateInternalRequest requestDeser = serializer.deserializeRequest(buf); assertEquals(buf.readerIndex(), frameLength + 4); assertEquals(kvStateId, requestDeser.getKvStateId()); assertArrayEquals(serializedKeyAndNamespace, requestDeser.getSerializedKeyAndNamespace()); }
Example #20
Source File: AbstractByteBufTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testRandomByteArrayTransfer2() { byte[] value = new byte[BLOCK_SIZE * 2]; for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { random.nextBytes(value); buffer.setBytes(i, value, random.nextInt(BLOCK_SIZE), BLOCK_SIZE); } random.setSeed(seed); byte[] expectedValueContent = new byte[BLOCK_SIZE * 2]; ByteBuf expectedValue = wrappedBuffer(expectedValueContent); for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { random.nextBytes(expectedValueContent); int valueOffset = random.nextInt(BLOCK_SIZE); buffer.getBytes(i, value, valueOffset, BLOCK_SIZE); for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { assertEquals(expectedValue.getByte(j), value[j]); } } }
Example #21
Source File: MessageSerializerTest.java From flink with Apache License 2.0 | 6 votes |
/** * Tests request serialization with zero-length serialized key and namespace. */ @Test public void testRequestSerializationWithZeroLengthKeyAndNamespace() throws Exception { long requestId = Integer.MAX_VALUE + 1337L; KvStateID kvStateId = new KvStateID(); byte[] serializedKeyAndNamespace = new byte[0]; final KvStateInternalRequest request = new KvStateInternalRequest(kvStateId, serializedKeyAndNamespace); final MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>(new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); ByteBuf buf = MessageSerializer.serializeRequest(alloc, requestId, request); int frameLength = buf.readInt(); assertEquals(MessageType.REQUEST, MessageSerializer.deserializeHeader(buf)); assertEquals(requestId, MessageSerializer.getRequestId(buf)); KvStateInternalRequest requestDeser = serializer.deserializeRequest(buf); assertEquals(buf.readerIndex(), frameLength + 4); assertEquals(kvStateId, requestDeser.getKvStateId()); assertArrayEquals(serializedKeyAndNamespace, requestDeser.getSerializedKeyAndNamespace()); }
Example #22
Source File: AbstractByteBufTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void testSliceCapacityChange(boolean retainedSlice) { ByteBuf buf = newBuffer(8); ByteBuf slice = retainedSlice ? buf.retainedSlice(buf.readerIndex() + 1, 3) : buf.slice(buf.readerIndex() + 1, 3); try { slice.capacity(10); } finally { if (retainedSlice) { slice.release(); } buf.release(); } }
Example #23
Source File: AbstractByteBufTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testSliceAfterReleaseRetainedSliceRetainedDuplicate() { ByteBuf buf = newBuffer(1); ByteBuf buf2 = buf.retainedSlice(0, 1); ByteBuf buf3 = buf2.retainedDuplicate(); assertSliceFailAfterRelease(buf, buf2, buf3); }
Example #24
Source File: AbstractByteBufTest.java From flink with Apache License 2.0 | 5 votes |
@Test(expected = IllegalReferenceCountException.class) public void testReadBytesAfterRelease4() { ByteBuf buffer = buffer(8); try { releasedBuffer().readBytes(buffer, 0, 1); } finally { buffer.release(); } }
Example #25
Source File: AbstractByteBufTest.java From flink with Apache License 2.0 | 5 votes |
@Test(expected = IndexOutOfBoundsException.class) public void testGetBytesByteBuffer() { byte[] bytes = {'a', 'b', 'c', 'd', 'e', 'f', 'g'}; // Ensure destination buffer is bigger then what is in the ByteBuf. ByteBuffer nioBuffer = ByteBuffer.allocate(bytes.length + 1); ByteBuf buffer = newBuffer(bytes.length); try { buffer.writeBytes(bytes); buffer.getBytes(buffer.readerIndex(), nioBuffer); } finally { buffer.release(); } }
Example #26
Source File: AbstractByteBufTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testHashCode() { ByteBuf elemA = buffer(15); ByteBuf elemB = directBuffer(15); elemA.writeBytes(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 }); elemB.writeBytes(new byte[] { 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9 }); Set<ByteBuf> set = new HashSet<ByteBuf>(); set.add(elemA); set.add(elemB); assertEquals(2, set.size()); ByteBuf elemACopy = elemA.copy(); assertTrue(set.contains(elemACopy)); ByteBuf elemBCopy = elemB.copy(); assertTrue(set.contains(elemBCopy)); buffer.clear(); buffer.writeBytes(elemA.duplicate()); assertTrue(set.remove(buffer)); assertFalse(set.contains(elemA)); assertEquals(1, set.size()); buffer.clear(); buffer.writeBytes(elemB.duplicate()); assertTrue(set.remove(buffer)); assertFalse(set.contains(elemB)); assertEquals(0, set.size()); elemA.release(); elemB.release(); elemACopy.release(); elemBCopy.release(); }
Example #27
Source File: AbstractByteBufTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testArrayAfterRelease() { ByteBuf buf = releasedBuffer(); if (buf.hasArray()) { try { buf.array(); fail(); } catch (IllegalReferenceCountException e) { // expected } } }
Example #28
Source File: AbstractByteBufTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test(expected = IllegalReferenceCountException.class) public void testReadBytesAfterRelease4() { ByteBuf buffer = buffer(8); try { releasedBuffer().readBytes(buffer, 0, 1); } finally { buffer.release(); } }
Example #29
Source File: AbstractByteBufTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test(expected = IndexOutOfBoundsException.class) public void readByteThrowsIndexOutOfBoundsException() { final ByteBuf buffer = newBuffer(8); try { buffer.writeByte(0); assertEquals((byte) 0, buffer.readByte()); buffer.readByte(); } finally { buffer.release(); } }
Example #30
Source File: NettyMessageSerializationTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private <T extends NettyMessage> T encodeAndDecode(T msg) { channel.writeOutbound(msg); ByteBuf encoded = (ByteBuf) channel.readOutbound(); assertTrue(channel.writeInbound(encoded)); return (T) channel.readInbound(); }