/** * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code> */ public Builder mergeBlkIdCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto value) { if (blkIdCmdBuilder_ == null) { if (((bitField0_ & 0x00000080) == 0x00000080) && blkIdCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) { blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.newBuilder(blkIdCmd_).mergeFrom(value).buildPartial(); } else { blkIdCmd_ = value; } onChanged(); } else { blkIdCmdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000080; return this; } /**
public static BlockIdCommandProto convert(BlockIdCommand cmd) { BlockIdCommandProto.Builder builder = BlockIdCommandProto.newBuilder() .setBlockPoolId(cmd.getBlockPoolId()); switch (cmd.getAction()) { case DatanodeProtocol.DNA_CACHE: builder.setAction(BlockIdCommandProto.Action.CACHE); break; case DatanodeProtocol.DNA_UNCACHE: builder.setAction(BlockIdCommandProto.Action.UNCACHE); break; default: throw new AssertionError("Invalid action"); } long[] blockIds = cmd.getBlockIds(); for (int i = 0; i < blockIds.length; i++) { builder.addBlockIds(blockIds[i]); } return builder.build(); }
/** * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code> */ public Builder mergeBlkIdCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto value) { if (blkIdCmdBuilder_ == null) { if (((bitField0_ & 0x00000080) == 0x00000080) && blkIdCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) { blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.newBuilder(blkIdCmd_).mergeFrom(value).buildPartial(); } else { blkIdCmd_ = value; } onChanged(); } else { blkIdCmdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000080; return this; } /**
/** * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code> */ public Builder mergeBlkIdCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto value) { if (blkIdCmdBuilder_ == null) { if (((bitField0_ & 0x00000080) == 0x00000080) && blkIdCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) { blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.newBuilder(blkIdCmd_).mergeFrom(value).buildPartial(); } else { blkIdCmd_ = value; } onChanged(); } else { blkIdCmdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000080; return this; } /**
public static BlockIdCommandProto convert(BlockIdCommand cmd) { BlockIdCommandProto.Builder builder = BlockIdCommandProto.newBuilder() .setBlockPoolId(cmd.getBlockPoolId()); switch (cmd.getAction()) { case DatanodeProtocol.DNA_CACHE: builder.setAction(BlockIdCommandProto.Action.CACHE); break; case DatanodeProtocol.DNA_UNCACHE: builder.setAction(BlockIdCommandProto.Action.UNCACHE); break; default: throw new AssertionError("Invalid action"); } long[] blockIds = cmd.getBlockIds(); for (int i = 0; i < blockIds.length; i++) { builder.addBlockIds(blockIds[i]); } return builder.build(); }
public static BlockIdCommandProto convert(BlockIdCommand cmd) { BlockIdCommandProto.Builder builder = BlockIdCommandProto.newBuilder() .setBlockPoolId(cmd.getBlockPoolId()); switch (cmd.getAction()) { case DatanodeProtocol.DNA_CACHE: builder.setAction(BlockIdCommandProto.Action.CACHE); break; case DatanodeProtocol.DNA_UNCACHE: builder.setAction(BlockIdCommandProto.Action.UNCACHE); break; default: throw new AssertionError("Invalid action"); } long[] blockIds = cmd.getBlockIds(); for (int i = 0; i < blockIds.length; i++) { builder.addBlockIds(blockIds[i]); } return builder.build(); }
public Builder toBuilder() { return newBuilder(this); }
public Builder toBuilder() { return newBuilder(this); }
public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto prototype) {
public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto prototype) {
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); }
public Builder toBuilder() { return newBuilder(this); }
public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto prototype) {
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); }