org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FanOutOneBlockAsyncDFSOutputHelper.java From hbase with Apache License 2.0 | 6 votes |
private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo, StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise) throws IOException { Promise<Void> saslPromise = channel.eventLoop().newPromise(); trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise); saslPromise.addListener(new FutureListener<Void>() { @Override public void operationComplete(Future<Void> future) throws Exception { if (future.isSuccess()) { // setup response processing pipeline first, then send request. processWriteBlockResponse(channel, dnInfo, promise, timeoutMs); requestWriteBlock(channel, storageType, writeBlockProtoBuilder); } else { promise.tryFailure(future.cause()); } } }); }
Example #2
Source File: Receiver.java From hadoop with Apache License 2.0 | 5 votes |
/** Receive OP_WRITE_BLOCK */ private void opWriteBlock(DataInputStream in) throws IOException { final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in)); final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList()); TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), PBHelper.convertStorageType(proto.getStorageType()), PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), targets, PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length), PBHelper.convert(proto.getSource()), fromProto(proto.getStage()), proto.getPipelineSize(), proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(), proto.getLatestGenerationStamp(), fromProto(proto.getRequestedChecksum()), (proto.hasCachingStrategy() ? getCachingStrategy(proto.getCachingStrategy()) : CachingStrategy.newDefaultStrategy()), (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false), (proto.hasPinning() ? proto.getPinning(): false), (PBHelper.convertBooleanList(proto.getTargetPinningsList()))); } finally { if (traceScope != null) traceScope.close(); } }
Example #3
Source File: Receiver.java From big-c with Apache License 2.0 | 5 votes |
/** Receive OP_WRITE_BLOCK */ private void opWriteBlock(DataInputStream in) throws IOException { final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in)); final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList()); TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), PBHelper.convertStorageType(proto.getStorageType()), PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), targets, PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length), PBHelper.convert(proto.getSource()), fromProto(proto.getStage()), proto.getPipelineSize(), proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(), proto.getLatestGenerationStamp(), fromProto(proto.getRequestedChecksum()), (proto.hasCachingStrategy() ? getCachingStrategy(proto.getCachingStrategy()) : CachingStrategy.newDefaultStrategy()), (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false), (proto.hasPinning() ? proto.getPinning(): false), (PBHelper.convertBooleanList(proto.getTargetPinningsList()))); } finally { if (traceScope != null) traceScope.close(); } }
Example #4
Source File: FanOutOneBlockAsyncDFSOutputHelper.java From hbase with Apache License 2.0 | 5 votes |
private static void requestWriteBlock(Channel channel, StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException { OpWriteBlockProto proto = writeBlockProtoBuilder.setStorageType(PBHelperClient.convertStorageType(storageType)).build(); int protoLen = proto.getSerializedSize(); ByteBuf buffer = channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen); buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); buffer.writeByte(Op.WRITE_BLOCK.code); proto.writeDelimitedTo(new ByteBufOutputStream(buffer)); channel.writeAndFlush(buffer); }
Example #5
Source File: Sender.java From hadoop with Apache License 2.0 | 4 votes |
@Override public void writeBlock(final ExtendedBlock blk, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String clientName, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final DatanodeInfo source, final BlockConstructionStage stage, final int pipelineSize, final long minBytesRcvd, final long maxBytesRcvd, final long latestGenerationStamp, DataChecksum requestedChecksum, final CachingStrategy cachingStrategy, final boolean allowLazyPersist, final boolean pinning, final boolean[] targetPinnings) throws IOException { ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader( blk, clientName, blockToken); ChecksumProto checksumProto = DataTransferProtoUtil.toProto(requestedChecksum); OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder() .setHeader(header) .setStorageType(PBHelper.convertStorageType(storageType)) .addAllTargets(PBHelper.convert(targets, 1)) .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1)) .setStage(toProto(stage)) .setPipelineSize(pipelineSize) .setMinBytesRcvd(minBytesRcvd) .setMaxBytesRcvd(maxBytesRcvd) .setLatestGenerationStamp(latestGenerationStamp) .setRequestedChecksum(checksumProto) .setCachingStrategy(getCachingStrategy(cachingStrategy)) .setAllowLazyPersist(allowLazyPersist) .setPinning(pinning) .addAllTargetPinnings(PBHelper.convert(targetPinnings, 1)); if (source != null) { proto.setSource(PBHelper.convertDatanodeInfo(source)); } send(out, Op.WRITE_BLOCK, proto.build()); }
Example #6
Source File: DataTransferProtoUtil.java From hadoop with Apache License 2.0 | 4 votes |
static BlockConstructionStage fromProto( OpWriteBlockProto.BlockConstructionStage stage) { return BlockConstructionStage.valueOf(stage.name()); }
Example #7
Source File: DataTransferProtoUtil.java From hadoop with Apache License 2.0 | 4 votes |
static OpWriteBlockProto.BlockConstructionStage toProto( BlockConstructionStage stage) { return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name()); }
Example #8
Source File: Sender.java From big-c with Apache License 2.0 | 4 votes |
@Override public void writeBlock(final ExtendedBlock blk, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String clientName, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final DatanodeInfo source, final BlockConstructionStage stage, final int pipelineSize, final long minBytesRcvd, final long maxBytesRcvd, final long latestGenerationStamp, DataChecksum requestedChecksum, final CachingStrategy cachingStrategy, final boolean allowLazyPersist, final boolean pinning, final boolean[] targetPinnings) throws IOException { ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader( blk, clientName, blockToken); ChecksumProto checksumProto = DataTransferProtoUtil.toProto(requestedChecksum); OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder() .setHeader(header) .setStorageType(PBHelper.convertStorageType(storageType)) .addAllTargets(PBHelper.convert(targets, 1)) .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1)) .setStage(toProto(stage)) .setPipelineSize(pipelineSize) .setMinBytesRcvd(minBytesRcvd) .setMaxBytesRcvd(maxBytesRcvd) .setLatestGenerationStamp(latestGenerationStamp) .setRequestedChecksum(checksumProto) .setCachingStrategy(getCachingStrategy(cachingStrategy)) .setAllowLazyPersist(allowLazyPersist) .setPinning(pinning) .addAllTargetPinnings(PBHelper.convert(targetPinnings, 1)); if (source != null) { proto.setSource(PBHelper.convertDatanodeInfo(source)); } send(out, Op.WRITE_BLOCK, proto.build()); }
Example #9
Source File: DataTransferProtoUtil.java From big-c with Apache License 2.0 | 4 votes |
static BlockConstructionStage fromProto( OpWriteBlockProto.BlockConstructionStage stage) { return BlockConstructionStage.valueOf(stage.name()); }
Example #10
Source File: DataTransferProtoUtil.java From big-c with Apache License 2.0 | 4 votes |
static OpWriteBlockProto.BlockConstructionStage toProto( BlockConstructionStage stage) { return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name()); }
Example #11
Source File: FanOutOneBlockAsyncDFSOutputHelper.java From hbase with Apache License 2.0 | 4 votes |
private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass) { StorageType[] storageTypes = locatedBlock.getStorageTypes(); DatanodeInfo[] datanodeInfos = locatedBlock.getLocations(); boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT); ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); blockCopy.setNumBytes(locatedBlock.getBlockSize()); ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder() .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelperClient.convert(blockCopy)) .setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))) .setClientName(clientName).build(); ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer); OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder() .setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())) .setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()) .setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS) .setRequestedChecksum(checksumProto) .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build()); List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length); for (int i = 0; i < datanodeInfos.length; i++) { DatanodeInfo dnInfo = datanodeInfos[i]; StorageType storageType = storageTypes[i]; Promise<Channel> promise = eventLoopGroup.next().newPromise(); futureList.add(promise); String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname); new Bootstrap().group(eventLoopGroup).channel(channelClass) .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { // we need to get the remote address of the channel so we can only move on after // channel connected. Leave an empty implementation here because netty does not allow // a null handler. } }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, timeoutMs, client, locatedBlock.getBlockToken(), promise); } else { promise.tryFailure(future.cause()); } } }); } return futureList; }