/** * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code> */ public Builder mergeReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) { if (readOpChecksumInfoBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && readOpChecksumInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) { readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder(readOpChecksumInfo_).mergeFrom(value).buildPartial(); } else { readOpChecksumInfo_ = value; } onChanged(); } else { readOpChecksumInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /**
checksumInfo.getChecksum());
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (checksumBuilder_ == null) { result.checksum_ = checksum_; } else { result.checksum_ = checksumBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.chunkOffset_ = chunkOffset_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }
.setReadOpChecksumInfo(ReadOpChecksumInfoProto.newBuilder() .setChecksum(DataTransferProtoUtil.toProto(DEFAULT_CHECKSUM)) .setChunkOffset(0L))
checksumInfo.getChecksum());
status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum()); long firstChunkOffset = checksumInfo.getChunkOffset();
status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum()); long firstChunkOffset = checksumInfo.getChunkOffset();
status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum()); long firstChunkOffset = checksumInfo.getChunkOffset();
if (hasReadOpChecksumInfo()) { result = result && getReadOpChecksumInfo() .equals(other.getReadOpChecksumInfo());
status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum()); long firstChunkOffset = checksumInfo.getChunkOffset();
status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum()); long firstChunkOffset = checksumInfo.getChunkOffset();
if (hasReadOpChecksumInfo()) { result = result && getReadOpChecksumInfo() .equals(other.getReadOpChecksumInfo());
if (hasReadOpChecksumInfo()) { result = result && getReadOpChecksumInfo() .equals(other.getReadOpChecksumInfo());
/** * Infer the checksum type for a replica by sending an OP_READ_BLOCK * for the first byte of that replica. This is used for compatibility * with older HDFS versions which did not include the checksum type in * OpBlockChecksumResponseProto. * * @param lb the located block * @param dn the connected datanode * @return the inferred checksum type * @throws IOException if an error occurs */ protected Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) throws IOException { IOStreamPair pair = connectToDN(dn, dfsClientConf.getSocketTimeout(), lb.getBlockToken()); try { new Sender((DataOutputStream) pair.out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true, CachingStrategy.newDefaultStrategy()); final BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(pair.in)); String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn; DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo); return PBHelperClient.convert( reply.getReadOpChecksumInfo().getChecksum().getType()); } finally { IOUtilsClient.cleanupWithLogger(LOG, pair.in, pair.out); } }
/** * Infer the checksum type for a replica by sending an OP_READ_BLOCK * for the first byte of that replica. This is used for compatibility * with older HDFS versions which did not include the checksum type in * OpBlockChecksumResponseProto. * * @param lb the located block * @param dn the connected datanode * @return the inferred checksum type * @throws IOException if an error occurs */ private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) throws IOException { IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb); try { DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out, HdfsConstants.SMALL_BUFFER_SIZE)); DataInputStream in = new DataInputStream(pair.in); new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true, CachingStrategy.newDefaultStrategy()); final BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn; DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo); return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType()); } finally { IOUtils.cleanup(null, pair.in, pair.out); } }
/** * Infer the checksum type for a replica by sending an OP_READ_BLOCK * for the first byte of that replica. This is used for compatibility * with older HDFS versions which did not include the checksum type in * OpBlockChecksumResponseProto. * * @param lb the located block * @param dn the connected datanode * @return the inferred checksum type * @throws IOException if an error occurs */ private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) throws IOException { IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb); try { DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out, HdfsConstants.SMALL_BUFFER_SIZE)); DataInputStream in = new DataInputStream(pair.in); new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true, CachingStrategy.newDefaultStrategy()); final BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn; DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo); return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType()); } finally { IOUtils.cleanup(null, pair.in, pair.out); } }
public Builder clear() { super.clear(); status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; bitField0_ = (bitField0_ & ~0x00000001); firstBadLink_ = ""; bitField0_ = (bitField0_ & ~0x00000002); if (checksumResponseBuilder_ == null) { checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance(); } else { checksumResponseBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); if (readOpChecksumInfoBuilder_ == null) { readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance(); } else { readOpChecksumInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); message_ = ""; bitField0_ = (bitField0_ & ~0x00000010); shortCircuitAccessVersion_ = 0; bitField0_ = (bitField0_ & ~0x00000020); return this; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) obj; boolean result = true; result = result && (hasChecksum() == other.hasChecksum()); if (hasChecksum()) { result = result && getChecksum() .equals(other.getChecksum()); } result = result && (hasChunkOffset() == other.hasChunkOffset()); if (hasChunkOffset()) { result = result && (getChunkOffset() == other.getChunkOffset()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
public Builder clear() { super.clear(); status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; bitField0_ = (bitField0_ & ~0x00000001); firstBadLink_ = ""; bitField0_ = (bitField0_ & ~0x00000002); if (checksumResponseBuilder_ == null) { checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance(); } else { checksumResponseBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); if (readOpChecksumInfoBuilder_ == null) { readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance(); } else { readOpChecksumInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); message_ = ""; bitField0_ = (bitField0_ & ~0x00000010); shortCircuitAccessVersion_ = 0; bitField0_ = (bitField0_ & ~0x00000020); return this; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) obj; boolean result = true; result = result && (hasChecksum() == other.hasChecksum()); if (hasChecksum()) { result = result && getChecksum() .equals(other.getChecksum()); } result = result && (hasChunkOffset() == other.hasChunkOffset()); if (hasChunkOffset()) { result = result && (getChunkOffset() == other.getChunkOffset()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }