io.netty.channel.DefaultMessageSizeEstimator Java Examples

The following examples show how to use io.netty.channel.DefaultMessageSizeEstimator. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PeerClient.java    From gsc-core with GNU Lesser General Public License v3.0 6 votes vote down vote up
private ChannelFuture connectAsync(String host, int port, String remoteId,
                                   boolean discoveryMode) {

    logger.info("connect peer {} {} {}", host, port, remoteId);

    GSCChannelInitializer GSCChannelInitializer = ctx
            .getBean(GSCChannelInitializer.class, remoteId);
    GSCChannelInitializer.setPeerDiscoveryMode(discoveryMode);

    Bootstrap b = new Bootstrap();
    b.group(workerGroup);
    b.channel(NioSocketChannel.class);

    b.option(ChannelOption.SO_KEEPALIVE, true);
    b.option(ChannelOption.MESSAGE_SIZE_ESTIMATOR, DefaultMessageSizeEstimator.DEFAULT);
    b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Args.getInstance().getNodeConnectionTimeout());
    b.remoteAddress(host, port);

    b.handler(GSCChannelInitializer);

    // Start the client.
    return b.connect();
}
 
Example #2
Source File: BaseNet.java    From gsc-core with GNU Lesser General Public License v3.0 6 votes vote down vote up
public static Channel connect(ByteToMessageDecoder decoder) throws InterruptedException {
  NioEventLoopGroup group = new NioEventLoopGroup(1);
  Bootstrap b = new Bootstrap();
  b.group(group).channel(NioSocketChannel.class)
      .handler(new ChannelInitializer<Channel>() {
        @Override
        protected void initChannel(Channel ch) throws Exception {
          ch.config().setRecvByteBufAllocator(new FixedRecvByteBufAllocator(256 * 1024));
          ch.config().setOption(ChannelOption.SO_RCVBUF, 256 * 1024);
          ch.config().setOption(ChannelOption.SO_BACKLOG, 1024);
          ch.pipeline()
              .addLast("readTimeoutHandler", new ReadTimeoutHandler(600, TimeUnit.SECONDS))
              .addLast("writeTimeoutHandler", new WriteTimeoutHandler(600, TimeUnit.SECONDS));
          ch.pipeline().addLast("protoPender", new ProtobufVarint32LengthFieldPrepender());
          ch.pipeline().addLast("lengthDecode", new ProtobufVarint32FrameDecoder());
          ch.pipeline().addLast("handshakeHandler", decoder);
          ch.closeFuture();
        }
      }).option(ChannelOption.SO_KEEPALIVE, true)
      .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 60000)
      .option(ChannelOption.MESSAGE_SIZE_ESTIMATOR, DefaultMessageSizeEstimator.DEFAULT);
  return b.connect("127.0.0.1", port).sync().channel();
}
 
Example #3
Source File: PeerServer.java    From gsc-core with GNU Lesser General Public License v3.0 5 votes vote down vote up
public void start(int port) {

        EventLoopGroup bossGroup = new NioEventLoopGroup(1);
        EventLoopGroup workerGroup = new NioEventLoopGroup(args.getTcpNettyWorkThreadNum());
        GSCChannelInitializer GSCChannelInitializer = ctx.getBean(GSCChannelInitializer.class, "");

        try {
            ServerBootstrap b = new ServerBootstrap();

            b.group(bossGroup, workerGroup);
            b.channel(NioServerSocketChannel.class);

            b.option(ChannelOption.SO_KEEPALIVE, true);
            b.option(ChannelOption.MESSAGE_SIZE_ESTIMATOR, DefaultMessageSizeEstimator.DEFAULT);
            b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, this.args.getNodeConnectionTimeout());

            b.handler(new LoggingHandler());
            b.childHandler(GSCChannelInitializer);

            // Start the client.
            logger.info("TCP listener started, bind port {}", port);

            channelFuture = b.bind(port).sync();

            listening = true;

            // Wait until the connection is closed.
            channelFuture.channel().closeFuture().sync();

            logger.info("TCP listener is closed");

        } catch (Exception e) {
            logger.error("Start TCP server failed.", e);
        } finally {
            workerGroup.shutdownGracefully();
            bossGroup.shutdownGracefully();
            listening = false;
        }
    }
 
Example #4
Source File: StreamBufferingEncoderTest.java    From netty-4.1.22 with Apache License 2.0 4 votes vote down vote up
/**
 * Init fields and do mocking.
 */
@Before
public void setup() throws Exception {
    MockitoAnnotations.initMocks(this);

    Http2FrameWriter.Configuration configuration = mock(Http2FrameWriter.Configuration.class);
    Http2FrameSizePolicy frameSizePolicy = mock(Http2FrameSizePolicy.class);
    when(writer.configuration()).thenReturn(configuration);
    when(configuration.frameSizePolicy()).thenReturn(frameSizePolicy);
    when(frameSizePolicy.maxFrameSize()).thenReturn(DEFAULT_MAX_FRAME_SIZE);
    when(writer.writeData(any(ChannelHandlerContext.class), anyInt(), any(ByteBuf.class), anyInt(), anyBoolean(),
            any(ChannelPromise.class))).thenAnswer(successAnswer());
    when(writer.writeRstStream(eq(ctx), anyInt(), anyLong(), any(ChannelPromise.class))).thenAnswer(
            successAnswer());
    when(writer.writeGoAway(any(ChannelHandlerContext.class), anyInt(), anyLong(), any(ByteBuf.class),
            any(ChannelPromise.class)))
            .thenAnswer(successAnswer());

    connection = new DefaultHttp2Connection(false);
    connection.remote().flowController(new DefaultHttp2RemoteFlowController(connection));
    connection.local().flowController(new DefaultHttp2LocalFlowController(connection).frameWriter(writer));

    DefaultHttp2ConnectionEncoder defaultEncoder =
            new DefaultHttp2ConnectionEncoder(connection, writer);
    encoder = new StreamBufferingEncoder(defaultEncoder);
    DefaultHttp2ConnectionDecoder decoder =
            new DefaultHttp2ConnectionDecoder(connection, encoder, mock(Http2FrameReader.class));
    Http2ConnectionHandler handler = new Http2ConnectionHandlerBuilder()
            .frameListener(mock(Http2FrameListener.class))
            .codec(decoder, encoder).build();

    // Set LifeCycleManager on encoder and decoder
    when(ctx.channel()).thenReturn(channel);
    when(ctx.alloc()).thenReturn(UnpooledByteBufAllocator.DEFAULT);
    when(channel.alloc()).thenReturn(UnpooledByteBufAllocator.DEFAULT);
    when(executor.inEventLoop()).thenReturn(true);
    doAnswer(new Answer<ChannelPromise>() {
        @Override
        public ChannelPromise answer(InvocationOnMock invocation) throws Throwable {
            return newPromise();
        }
    }).when(ctx).newPromise();
    when(ctx.executor()).thenReturn(executor);
    when(channel.isActive()).thenReturn(false);
    when(channel.config()).thenReturn(config);
    when(channel.isWritable()).thenReturn(true);
    when(channel.bytesBeforeUnwritable()).thenReturn(Long.MAX_VALUE);
    when(config.getWriteBufferHighWaterMark()).thenReturn(Integer.MAX_VALUE);
    when(config.getMessageSizeEstimator()).thenReturn(DefaultMessageSizeEstimator.DEFAULT);
    ChannelMetadata metadata = new ChannelMetadata(false, 16);
    when(channel.metadata()).thenReturn(metadata);
    when(channel.unsafe()).thenReturn(unsafe);
    handler.handlerAdded(ctx);
}
 
Example #5
Source File: PeerServer.java    From ethereumj with MIT License 4 votes vote down vote up
public void start(int port) {


        inactivesCollector.scheduleAtFixedRate(new TimerTask() {
            public void run() {

                Iterator<Channel> iter = channels.iterator();
                while(iter.hasNext()){
                    Channel channel = iter.next();
                    if(!channel.p2pHandler.isActive()){

                        iter.remove();
                        logger.info("Channel removed: {}", channel.p2pHandler.getHandshakeHelloMessage());
                    }
                }
            }
        }, 2000, 5000);


        EventLoopGroup bossGroup = new NioEventLoopGroup(1);
        EventLoopGroup workerGroup = new NioEventLoopGroup();

        if (peerListener != null)
        	peerListener.console("Listening on port " + port);


        try {
            ServerBootstrap b = new ServerBootstrap();

            b.group(bossGroup, workerGroup);
            b.channel(NioServerSocketChannel.class);
            
            b.option(ChannelOption.SO_KEEPALIVE, true);
            b.option(ChannelOption.MESSAGE_SIZE_ESTIMATOR, DefaultMessageSizeEstimator.DEFAULT);
            b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, CONFIG.peerConnectionTimeout());

            b.handler(new LoggingHandler());
            b.childHandler(new EthereumChannelInitializer(this));

            // Start the client.
            logger.info("Listening for incoming connections, port: [{}] ", port);
            ChannelFuture f = b.bind(port).sync();

            // Wait until the connection is closed.
            f.channel().closeFuture().sync();
            logger.debug("Connection is closed");

        } catch (Exception e) {
        	logger.debug("Exception: {} ({})", e.getMessage(), e.getClass().getName());
            throw new Error("Server Disconnnected");
        } finally {
        	workerGroup.shutdownGracefully();

        }
    }