/** * Read from a stream. */ public InternalValueCount(StreamInput in) throws IOException { super(in); value = in.readVLong(); }
/** * Read from a stream. */ protected InternalSingleBucketAggregation(StreamInput in) throws IOException { super(in); docCount = in.readVLong(); aggregations = InternalAggregations.readAggregations(in); }
/** * Read from a stream. */ public InternalBucket(StreamInput in, boolean keyed) throws IOException { this.keyed = keyed; key = in.readOptionalString(); docCount = in.readVLong(); aggregations = InternalAggregations.readAggregations(in); }
/** * Read from a stream. */ public InternalAvg(StreamInput in) throws IOException { super(in); format = in.readNamedWriteable(DocValueFormat.class); sum = in.readDouble(); count = in.readVLong(); }
/** * Read from a stream. */ public Bucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException { this.format = format; this.keyed = keyed; key = in.readLong(); docCount = in.readVLong(); aggregations = InternalAggregations.readAggregations(in); }
public Entry(StreamInput in) throws IOException { this.snapshot = new Snapshot(in); this.startTime = in.readVLong(); this.repositoryStateId = in.readLong(); }
/** * Read from a stream. */ public InternalBucket(StreamInput in) throws IOException { key = in.readOptionalString(); docCount = in.readVLong(); aggregations = InternalAggregations.readAggregations(in); }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); timestamp = in.readVLong(); // it may be that the master switched on us while doing the operation. In this case the status may be null. status = in.readOptionalWriteable(ClusterHealthStatus::readFrom); }
/** * Read from a stream. */ public Bucket(StreamInput in, DocValueFormat format) throws IOException { this.format = format; key = in.readLong(); docCount = in.readVLong(); aggregations = InternalAggregations.readAggregations(in); }
/** * Read from a stream. */ private Bucket(StreamInput in) throws IOException { geohashAsLong = in.readLong(); docCount = in.readVLong(); aggregations = InternalAggregations.readAggregations(in); }
@Override public void readFrom(StreamInput in) throws IOException { targetAllocationID = in.readString(); primaryTerm = in.readVLong(); request.readFrom(in); }
public RoundingInfo(StreamInput in) throws IOException { rounding = Rounding.Streams.read(in); roughEstimateDurationMillis = in.readVLong(); innerIntervals = in.readIntArray(); unitAbbreviation = in.readString(); }
public ByteSizeValue(StreamInput in) throws IOException { if (in.getVersion().before(Version.V_6_2_0)) { size = in.readVLong(); unit = ByteSizeUnit.BYTES; } else { size = in.readZLong(); unit = ByteSizeUnit.readFrom(in); } }
/** * Read from a stream. */ public InternalStats(StreamInput in) throws IOException { super(in); format = in.readNamedWriteable(DocValueFormat.class); count = in.readVLong(); min = in.readDouble(); max = in.readDouble(); sum = in.readDouble(); }
/** * Read from a stream. */ protected InternalMappedTerms(StreamInput in, Bucket.Reader<B> bucketReader) throws IOException { super(in); docCountError = in.readZLong(); format = in.readNamedWriteable(DocValueFormat.class); shardSize = readSize(in); showTermDocCountError = in.readBoolean(); otherDocCount = in.readVLong(); buckets = in.readList(stream -> bucketReader.read(stream, format, showTermDocCountError)); }