/** * Read a {@link TimeValue} from the stream */ public TimeValue readTimeValue() throws IOException { long duration = readZLong(); TimeUnit timeUnit = BYTE_TIME_UNIT_MAP.get(readByte()); return new TimeValue(duration, timeUnit); }
@Override public void readFrom(final StreamInput in) throws IOException { // before 6.0.0 we received an empty response so we have to maintain that if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { localCheckpoint = in.readZLong(); } else { localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; } }
public CheckpointState(StreamInput in) throws IOException { this.localCheckpoint = in.readZLong(); this.globalCheckpoint = in.readZLong(); this.inSync = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_6_3_0)) { this.tracked = in.readBoolean(); } else { // Every in-sync shard copy is also tracked (see invariant). This was the case even in earlier ES versions. // Non in-sync shard copies might be tracked or not. As this information here is only serialized during relocation hand-off, // after which replica recoveries cannot complete anymore (i.e. they cannot move from in-sync == false to in-sync == true), // we can treat non in-sync replica shard copies as untracked. They will go through a fresh recovery against the new // primary and will become tracked again under this primary before they are marked as in-sync. this.tracked = inSync; } }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { localCheckpoint = in.readZLong(); } else { // 5.x used to read empty responses, which don't really read anything off the stream, so just do nothing. localCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT; } if (in.getVersion().onOrAfter(Version.V_6_0_0_rc1)) { globalCheckpoint = in.readZLong(); } else { globalCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT; } }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { globalCheckpoint = in.readZLong(); } else { globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; } if (in.getVersion().onOrAfter(Version.V_6_5_0)) { maxSeqNoOfUpdatesOrDeletes = in.readZLong(); } else { // UNASSIGNED_SEQ_NO (-2) means uninitialized, and replicas will disable // optimization using seq_no if its max_seq_no_of_updates is still uninitialized maxSeqNoOfUpdatesOrDeletes = SequenceNumbers.UNASSIGNED_SEQ_NO; } }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); responses = new BulkItemResponse[in.readVInt()]; for (int i = 0; i < responses.length; i++) { responses[i] = BulkItemResponse.readBulkItem(in); } tookInMillis = in.readVLong(); ingestTookInMillis = in.readZLong(); }
public ByteSizeValue(StreamInput in) throws IOException { if (in.getVersion().before(Version.V_6_2_0)) { size = in.readVLong(); unit = ByteSizeUnit.BYTES; } else { size = in.readZLong(); unit = ByteSizeUnit.readFrom(in); } }
@Override public void readFrom(final StreamInput in) throws IOException { if (in.getVersion().equals(Version.V_6_0_0)) { /* * Resync replication request serialization was broken in 6.0.0 due to the elements of the stream not being prefixed with a * byte indicating the type of the operation. */ throw new IllegalStateException("resync replication request serialization is broken in 6.0.0"); } super.readFrom(in); if (in.getVersion().onOrAfter(Version.V_6_4_0)) { trimAboveSeqNo = in.readZLong(); } else { trimAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; } if (in.getVersion().onOrAfter(Version.V_6_5_0)) { maxSeenAutoIdTimestampOnPrimary = in.readZLong(); } else { maxSeenAutoIdTimestampOnPrimary = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; } operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new); }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); shardId = ShardId.readShardId(in); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { globalCheckpoint = in.readZLong(); } else { globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; } }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); shardId = ShardId.readShardId(in); type = in.readString(); id = in.readString(); version = in.readZLong(); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { seqNo = in.readZLong(); primaryTerm = in.readVLong(); } else { seqNo = UNASSIGNED_SEQ_NO; primaryTerm = UNASSIGNED_PRIMARY_TERM; } forcedRefresh = in.readBoolean(); result = Result.readFrom(in); }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); shardId = ShardId.readShardId(in); operations = Translog.readOperations(in, "recovery"); totalTranslogOps = in.readVInt(); if (in.getVersion().onOrAfter(Version.V_6_5_0)) { maxSeenAutoIdTimestampOnPrimary = in.readZLong(); } else { maxSeenAutoIdTimestampOnPrimary = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; } if (in.getVersion().onOrAfter(Version.V_6_5_0)) { maxSeqNoOfUpdatesOrDeletesOnPrimary = in.readZLong(); } else { // UNASSIGNED_SEQ_NO means uninitialized and replica won't enable optimization using seq_no maxSeqNoOfUpdatesOrDeletesOnPrimary = SequenceNumbers.UNASSIGNED_SEQ_NO; } }
/** * Read from a stream. */ protected InternalMappedTerms(StreamInput in, Bucket.Reader<B> bucketReader) throws IOException { super(in); docCountError = in.readZLong(); format = in.readNamedWriteable(DocValueFormat.class); shardSize = readSize(in); showTermDocCountError = in.readBoolean(); otherDocCount = in.readVLong(); buckets = in.readList(stream -> bucketReader.read(stream, format, showTermDocCountError)); }
id = in.readString(); if (in.getVersion().onOrAfter(Version.V_6_6_0)) { seqNo = in.readZLong(); primaryTerm = in.readVLong(); } else {
hasProfileResults = profileShardResults != null; if (in.getVersion().onOrAfter(Version.V_6_0_0_beta1)) { serviceTimeEWMA = in.readZLong(); nodeQueueSize = in.readInt(); } else {
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); type = in.readString(); id = in.readString(); routing = in.readOptionalString(); parent = in.readOptionalString(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); if (in.getVersion().onOrAfter(Version.V_6_6_0)) { ifSeqNo = in.readZLong(); ifPrimaryTerm = in.readVLong(); } else { ifSeqNo = UNASSIGNED_SEQ_NO; ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; } }
/** * Read from a stream. */ public Failure(StreamInput in) throws IOException { index = in.readString(); type = in.readString(); id = in.readOptionalString(); cause = in.readException(); status = ExceptionsHelper.status(cause); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { seqNo = in.readZLong(); } else { seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; } if (supportsAbortedFlag(in.getVersion())) { aborted = in.readBoolean(); } else { aborted = false; } }
/** * Read a {@link TimeValue} from the stream */ public TimeValue readTimeValue() throws IOException { long duration = readZLong(); TimeUnit timeUnit = BYTE_TIME_UNIT_MAP.get(readByte()); return new TimeValue(duration, timeUnit); }
ifSeqNo = in.readZLong(); ifPrimaryTerm = in.readVLong(); } else {
/** * Read from a stream. */ protected InternalMappedTerms(StreamInput in, Bucket.Reader<B> bucketReader) throws IOException { super(in); docCountError = in.readZLong(); format = in.readNamedWriteable(DocValueFormat.class); shardSize = readSize(in); showTermDocCountError = in.readBoolean(); otherDocCount = in.readVLong(); buckets = in.readList(stream -> bucketReader.read(stream, format, showTermDocCountError)); }