@Override protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception { Status reply = getStatus(ack); if (reply != Status.SUCCESS) { failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + block + " from datanode " + ctx.channel().remoteAddress())); return; } if (PipelineAck.isRestartOOBStatus(reply)) { failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " + block + " from datanode " + ctx.channel().remoteAddress())); return; } if (ack.getSeqno() == HEART_BEAT_SEQNO) { return; } completed(ctx.channel()); }
if (!getFlagList().isEmpty()) { size += 1; size += com.google.protobuf.CodedOutputStream size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size;
if (!getFlagList().isEmpty()) { size += 1; size += com.google.protobuf.CodedOutputStream size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size;
if (!getFlagList().isEmpty()) { size += 1; size += io.prestosql.hadoop.$internal.com.google.protobuf.CodedOutputStream size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size;
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance()) return this; if (other.hasSeqno()) { setSeqno(other.getSeqno()); if (other.hasDownstreamAckTimeNanos()) { setDownstreamAckTimeNanos(other.getDownstreamAckTimeNanos()); this.mergeUnknownFields(other.getUnknownFields()); return this;
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance()) return this; if (other.hasSeqno()) { setSeqno(other.getSeqno()); if (other.hasDownstreamAckTimeNanos()) { setDownstreamAckTimeNanos(other.getDownstreamAckTimeNanos()); this.mergeUnknownFields(other.getUnknownFields()); return this;
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance()) return this; if (other.hasSeqno()) { setSeqno(other.getSeqno()); if (other.hasDownstreamAckTimeNanos()) { setDownstreamAckTimeNanos(other.getDownstreamAckTimeNanos()); this.mergeUnknownFields(other.getUnknownFields()); return this;
@Test public void TestPipeLineAckCompatibility() throws IOException { DataTransferProtos.PipelineAckProto proto = DataTransferProtos .PipelineAckProto.newBuilder() .setSeqno(0) .addReply(Status.CHECKSUM_OK) .build(); DataTransferProtos.PipelineAckProto newProto = DataTransferProtos .PipelineAckProto.newBuilder().mergeFrom(proto) .addFlag(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED, Status.CHECKSUM_OK)) .build(); ByteArrayOutputStream oldAckBytes = new ByteArrayOutputStream(); proto.writeDelimitedTo(oldAckBytes); PipelineAck oldAck = new PipelineAck(); oldAck.readFields(new ByteArrayInputStream(oldAckBytes.toByteArray())); assertEquals( PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status.CHECKSUM_OK), oldAck.getHeaderFlag(0)); PipelineAck newAck = new PipelineAck(); ByteArrayOutputStream newAckBytes = new ByteArrayOutputStream(); newProto.writeDelimitedTo(newAckBytes); newAck.readFields(new ByteArrayInputStream(newAckBytes.toByteArray())); assertEquals(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED, Status.CHECKSUM_OK), newAck.getHeaderFlag(0)); }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) obj; boolean result = true; result = result && (hasSeqno() == other.hasSeqno()); if (hasSeqno()) { result = result && (getSeqno() == other.getSeqno()); } result = result && getReplyList() .equals(other.getReplyList()); result = result && (hasDownstreamAckTimeNanos() == other.hasDownstreamAckTimeNanos()); if (hasDownstreamAckTimeNanos()) { result = result && (getDownstreamAckTimeNanos() == other.getDownstreamAckTimeNanos()); } result = result && getFlagList() .equals(other.getFlagList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) obj; boolean result = true; result = result && (hasSeqno() == other.hasSeqno()); if (hasSeqno()) { result = result && (getSeqno() == other.getSeqno()); } result = result && getReplyList() .equals(other.getReplyList()); result = result && (hasDownstreamAckTimeNanos() == other.hasDownstreamAckTimeNanos()); if (hasDownstreamAckTimeNanos()) { result = result && (getDownstreamAckTimeNanos() == other.getDownstreamAckTimeNanos()); } result = result && getFlagList() .equals(other.getFlagList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) obj; boolean result = true; result = result && (hasSeqno() == other.hasSeqno()); if (hasSeqno()) { result = result && (getSeqno() == other.getSeqno()); } result = result && getReplyList() .equals(other.getReplyList()); result = result && (hasDownstreamAckTimeNanos() == other.hasDownstreamAckTimeNanos()); if (hasDownstreamAckTimeNanos()) { result = result && (getDownstreamAckTimeNanos() == other.getDownstreamAckTimeNanos()); } result = result && getFlagList() .equals(other.getFlagList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.seqno_ = seqno_; if (((bitField0_ & 0x00000002) == 0x00000002)) { reply_ = java.util.Collections.unmodifiableList(reply_); bitField0_ = (bitField0_ & ~0x00000002); } result.reply_ = reply_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000002; } result.downstreamAckTimeNanos_ = downstreamAckTimeNanos_; if (((bitField0_ & 0x00000008) == 0x00000008)) { flag_ = java.util.Collections.unmodifiableList(flag_); bitField0_ = (bitField0_ & ~0x00000008); } result.flag_ = flag_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSeqno()) { hash = (37 * hash) + SEQNO_FIELD_NUMBER; hash = (53 * hash) + hashLong(getSeqno()); } if (getReplyCount() > 0) { hash = (37 * hash) + REPLY_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getReplyList()); } if (hasDownstreamAckTimeNanos()) { hash = (37 * hash) + DOWNSTREAMACKTIMENANOS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getDownstreamAckTimeNanos()); } if (getFlagCount() > 0) { hash = (37 * hash) + FLAG_FIELD_NUMBER; hash = (53 * hash) + getFlagList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.seqno_ = seqno_; if (((bitField0_ & 0x00000002) == 0x00000002)) { reply_ = java.util.Collections.unmodifiableList(reply_); bitField0_ = (bitField0_ & ~0x00000002); } result.reply_ = reply_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000002; } result.downstreamAckTimeNanos_ = downstreamAckTimeNanos_; if (((bitField0_ & 0x00000008) == 0x00000008)) { flag_ = java.util.Collections.unmodifiableList(flag_); bitField0_ = (bitField0_ & ~0x00000008); } result.flag_ = flag_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSeqno()) { hash = (37 * hash) + SEQNO_FIELD_NUMBER; hash = (53 * hash) + hashLong(getSeqno()); } if (getReplyCount() > 0) { hash = (37 * hash) + REPLY_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getReplyList()); } if (hasDownstreamAckTimeNanos()) { hash = (37 * hash) + DOWNSTREAMACKTIMENANOS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getDownstreamAckTimeNanos()); } if (getFlagCount() > 0) { hash = (37 * hash) + FLAG_FIELD_NUMBER; hash = (53 * hash) + getFlagList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSeqno()) { hash = (37 * hash) + SEQNO_FIELD_NUMBER; hash = (53 * hash) + hashLong(getSeqno()); } if (getReplyCount() > 0) { hash = (37 * hash) + REPLY_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getReplyList()); } if (hasDownstreamAckTimeNanos()) { hash = (37 * hash) + DOWNSTREAMACKTIMENANOS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getDownstreamAckTimeNanos()); } if (getFlagCount() > 0) { hash = (37 * hash) + FLAG_FIELD_NUMBER; hash = (53 * hash) + getFlagList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeSInt64(1, seqno_); } for (int i = 0; i < reply_.size(); i++) { output.writeEnum(2, reply_.get(i).getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(3, downstreamAckTimeNanos_); } if (getFlagList().size() > 0) { output.writeRawVarint32(34); output.writeRawVarint32(flagMemoizedSerializedSize); } for (int i = 0; i < flag_.size(); i++) { output.writeUInt32NoTag(flag_.get(i)); } getUnknownFields().writeTo(output); }
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeSInt64(1, seqno_); } for (int i = 0; i < reply_.size(); i++) { output.writeEnum(2, reply_.get(i).getNumber()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(3, downstreamAckTimeNanos_); } if (getFlagList().size() > 0) { output.writeRawVarint32(34); output.writeRawVarint32(flagMemoizedSerializedSize); } for (int i = 0; i < flag_.size(); i++) { output.writeUInt32NoTag(flag_.get(i)); } getUnknownFields().writeTo(output); }
/** * Constructor * @param seqno sequence number * @param replies an array of replies * @param downstreamAckTimeNanos ack RTT in nanoseconds, 0 if no next DN in pipeline */ public PipelineAck(long seqno, int[] replies, long downstreamAckTimeNanos) { ArrayList<Status> statusList = Lists.newArrayList(); ArrayList<Integer> flagList = Lists.newArrayList(); for (int r : replies) { statusList.add(StatusFormat.getStatus(r)); flagList.add(r); } proto = PipelineAckProto.newBuilder() .setSeqno(seqno) .addAllReply(statusList) .addAllFlag(flagList) .setDownstreamAckTimeNanos(downstreamAckTimeNanos) .build(); }
/** * Constructor * @param seqno sequence number * @param replies an array of replies * @param downstreamAckTimeNanos ack RTT in nanoseconds, 0 if no next DN in pipeline */ public PipelineAck(long seqno, int[] replies, long downstreamAckTimeNanos) { ArrayList<Status> statusList = Lists.newArrayList(); ArrayList<Integer> flagList = Lists.newArrayList(); for (int r : replies) { statusList.add(StatusFormat.getStatus(r)); flagList.add(r); } proto = PipelineAckProto.newBuilder() .setSeqno(seqno) .addAllReply(statusList) .addAllFlag(flagList) .setDownstreamAckTimeNanos(downstreamAckTimeNanos) .build(); }