checksumInfo.getChecksum());
status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum());
status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum());
status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum());
status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum());
/** * Infer the checksum type for a replica by sending an OP_READ_BLOCK * for the first byte of that replica. This is used for compatibility * with older HDFS versions which did not include the checksum type in * OpBlockChecksumResponseProto. * * @param lb the located block * @param dn the connected datanode * @return the inferred checksum type * @throws IOException if an error occurs */ protected Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) throws IOException { IOStreamPair pair = connectToDN(dn, dfsClientConf.getSocketTimeout(), lb.getBlockToken()); try { new Sender((DataOutputStream) pair.out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true, CachingStrategy.newDefaultStrategy()); final BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(pair.in)); String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn; DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo); return PBHelperClient.convert( reply.getReadOpChecksumInfo().getChecksum().getType()); } finally { IOUtilsClient.cleanupWithLogger(LOG, pair.in, pair.out); } }
/** * Infer the checksum type for a replica by sending an OP_READ_BLOCK * for the first byte of that replica. This is used for compatibility * with older HDFS versions which did not include the checksum type in * OpBlockChecksumResponseProto. * * @param lb the located block * @param dn the connected datanode * @return the inferred checksum type * @throws IOException if an error occurs */ private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) throws IOException { IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb); try { DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out, HdfsConstants.SMALL_BUFFER_SIZE)); DataInputStream in = new DataInputStream(pair.in); new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true, CachingStrategy.newDefaultStrategy()); final BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn; DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo); return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType()); } finally { IOUtils.cleanup(null, pair.in, pair.out); } }
/** * Infer the checksum type for a replica by sending an OP_READ_BLOCK * for the first byte of that replica. This is used for compatibility * with older HDFS versions which did not include the checksum type in * OpBlockChecksumResponseProto. * * @param lb the located block * @param dn the connected datanode * @return the inferred checksum type * @throws IOException if an error occurs */ private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) throws IOException { IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb); try { DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out, HdfsConstants.SMALL_BUFFER_SIZE)); DataInputStream in = new DataInputStream(pair.in); new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true, CachingStrategy.newDefaultStrategy()); final BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn; DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo); return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType()); } finally { IOUtils.cleanup(null, pair.in, pair.out); } }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) obj; boolean result = true; result = result && (hasChecksum() == other.hasChecksum()); if (hasChecksum()) { result = result && getChecksum() .equals(other.getChecksum()); } result = result && (hasChunkOffset() == other.hasChunkOffset()); if (hasChunkOffset()) { result = result && (getChunkOffset() == other.getChunkOffset()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) obj; boolean result = true; result = result && (hasChecksum() == other.hasChecksum()); if (hasChecksum()) { result = result && getChecksum() .equals(other.getChecksum()); } result = result && (hasChunkOffset() == other.hasChunkOffset()); if (hasChunkOffset()) { result = result && (getChunkOffset() == other.getChunkOffset()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) obj; boolean result = true; result = result && (hasChecksum() == other.hasChecksum()); if (hasChecksum()) { result = result && getChecksum() .equals(other.getChecksum()); } result = result && (hasChunkOffset() == other.hasChunkOffset()); if (hasChunkOffset()) { result = result && (getChunkOffset() == other.getChunkOffset()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasChecksum()) { hash = (37 * hash) + CHECKSUM_FIELD_NUMBER; hash = (53 * hash) + getChecksum().hashCode(); } if (hasChunkOffset()) { hash = (37 * hash) + CHUNKOFFSET_FIELD_NUMBER; hash = (53 * hash) + hashLong(getChunkOffset()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasChecksum()) { hash = (37 * hash) + CHECKSUM_FIELD_NUMBER; hash = (53 * hash) + getChecksum().hashCode(); } if (hasChunkOffset()) { hash = (37 * hash) + CHUNKOFFSET_FIELD_NUMBER; hash = (53 * hash) + hashLong(getChunkOffset()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasChecksum()) { memoizedIsInitialized = 0; return false; } if (!hasChunkOffset()) { memoizedIsInitialized = 0; return false; } if (!getChecksum().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; }
public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasChecksum()) { memoizedIsInitialized = 0; return false; } if (!hasChunkOffset()) { memoizedIsInitialized = 0; return false; } if (!getChecksum().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasChecksum()) { hash = (37 * hash) + CHECKSUM_FIELD_NUMBER; hash = (53 * hash) + getChecksum().hashCode(); } if (hasChunkOffset()) { hash = (37 * hash) + CHUNKOFFSET_FIELD_NUMBER; hash = (53 * hash) + hashLong(getChunkOffset()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasChecksum()) { memoizedIsInitialized = 0; return false; } if (!hasChunkOffset()) { memoizedIsInitialized = 0; return false; } if (!getChecksum().isInitialized()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; }
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) return this; if (other.hasChecksum()) { mergeChecksum(other.getChecksum()); } if (other.hasChunkOffset()) { setChunkOffset(other.getChunkOffset()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) return this; if (other.hasChecksum()) { mergeChecksum(other.getChecksum()); } if (other.hasChunkOffset()) { setChunkOffset(other.getChunkOffset()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) return this; if (other.hasChecksum()) { mergeChecksum(other.getChecksum()); } if (other.hasChunkOffset()) { setChunkOffset(other.getChunkOffset()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }