/** * Read from a stream. */ public Bucket(StreamInput in, DocValueFormat format, boolean showDocCountError) throws IOException { super(in, format, showDocCountError); termBytes = in.readBytesRef(); }
return readDoubleArray(); case 21: return readBytesRef(); case 22: return readGeoPoint();
public BytesRef readBytesRef() throws IOException { int length = readArraySize(); return readBytesRef(length); }
/** * Read from a stream. */ public Bucket(StreamInput in, long subsetSize, long supersetSize, DocValueFormat format) throws IOException { super(in, subsetSize, supersetSize, format); termBytes = in.readBytesRef(); subsetDf = in.readVLong(); supersetDf = in.readVLong(); score = in.readDouble(); aggregations = InternalAggregations.readAggregations(in); }
/** * Read from a stream. */ public TypeQueryBuilder(StreamInput in) throws IOException { super(in); if (in.getVersion().onOrAfter(Version.V_6_3_0)) { type = in.readString(); } else { type = in.readBytesRef().utf8ToString(); } }
/** * Read from a stream. */ public StoreFileMetaData(StreamInput in) throws IOException { name = in.readString(); length = in.readVLong(); checksum = in.readString(); try { writtenBy = Version.parse(in.readString()); } catch (ParseException e) { throw new AssertionError(e); } hash = in.readBytesRef(); }
private static Bucket createFromStream(StreamInput in, DocValueFormat format, boolean keyed) throws IOException { String key = in.getVersion().onOrAfter(Version.V_6_4_0) ? in.readString() : in.readOptionalString(); BytesRef from = in.readBoolean() ? in.readBytesRef() : null; BytesRef to = in.readBoolean() ? in.readBytesRef() : null; long docCount = in.readLong(); InternalAggregations aggregations = InternalAggregations.readAggregations(in); return new Bucket(format, keyed, key, from, to, docCount, aggregations); }
@Override public void readFrom(StreamInput in) throws IOException { int size = in.readVInt(); termStatistics = HppcMaps.newMap(size); for (int i = 0; i < size; i++) { Term term = new Term(in.readString(), in.readBytesRef()); TermStatistics stats = new TermStatistics(in.readBytesRef(), in.readVLong(), DfsSearchResult.subOne(in.readVLong())); termStatistics.put(term, stats); } fieldStatistics = DfsSearchResult.readFieldStats(in); maxDoc = in.readVLong(); }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); requestId = in.readLong(); int termsSize = in.readVInt(); if (termsSize == 0) { terms = EMPTY_TERMS; } else { terms = new Term[termsSize]; for (int i = 0; i < terms.length; i++) { terms[i] = new Term(in.readString(), in.readBytesRef()); } } this.termStatistics = readTermStats(in, terms); readFieldStats(in, fieldStatistics); maxDoc = in.readVInt(); }
includeValues = new TreeSet<>(); for (int i = 0; i < size; i++) { includeValues.add(in.readBytesRef()); excludeValues = new TreeSet<>(); for (int i = 0; i < size; i++) { excludeValues.add(in.readBytesRef());
public static Comparable readSortValue(StreamInput in) throws IOException { byte type = in.readByte(); if (type == 0) { return null; } else if (type == 1) { return in.readString(); } else if (type == 2) { return in.readInt(); } else if (type == 3) { return in.readLong(); } else if (type == 4) { return in.readFloat(); } else if (type == 5) { return in.readDouble(); } else if (type == 6) { return in.readByte(); } else if (type == 7) { return in.readShort(); } else if (type == 8) { return in.readBoolean(); } else if (type == 9) { return in.readBytesRef(); } else { throw new IOException("Can't match type [" + type + "]"); } }
@Override public BytesRef readBytesRef(int length) throws IOException { // NOTE: It is unsafe to share a reference of the internal structure, so we // use the default implementation which will copy the bytes. It is unsafe because // a netty ByteBuf might be pooled which requires a manual release to prevent // memory leaks. return super.readBytesRef(length); }
@Override public BytesRef readBytesRef(int length) throws IOException { // NOTE: It is unsafe to share a reference of the internal structure, so we // use the default implementation which will copy the bytes. It is unsafe because // a netty ByteBuf might be pooled which requires a manual release to prevent // memory leaks. return super.readBytesRef(length); }
@Override public BytesRef readBytesRef(int length) throws IOException { // NOTE: It is unsafe to share a reference of the internal structure, so we // use the default implementation which will copy the bytes. It is unsafe because // a netty ByteBuf might be pooled which requires a manual release to prevent // memory leaks. return super.readBytesRef(length); }
private Delete(final StreamInput in) throws IOException { final int format = in.readVInt();// SERIALIZATION_FORMAT assert format >= FORMAT_5_0 : "format was: " + format; if (format >= FORMAT_SINGLE_TYPE) { type = in.readString(); id = in.readString(); if (format >= FORMAT_SEQ_NO) { uid = new Term(in.readString(), in.readBytesRef()); } else { uid = new Term(in.readString(), in.readString()); } } else { uid = new Term(in.readString(), in.readString()); // the uid was constructed from the type and id so we can // extract them back Uid uidObject = Uid.createUid(uid.text()); type = uidObject.type(); id = uidObject.id(); } this.version = in.readLong(); this.versionType = VersionType.fromValue(in.readByte()); assert versionType.validateVersionForWrites(this.version); if (format >= FORMAT_SEQ_NO) { seqNo = in.readLong(); primaryTerm = in.readLong(); } else { seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; primaryTerm = 0; } }
public static FieldDoc readFieldDoc(StreamInput in) throws IOException { Comparable[] cFields = new Comparable[in.readVInt()]; for (int j = 0; j < cFields.length; j++) { byte type = in.readByte(); if (type == 0) { cFields[j] = null; } else if (type == 1) { cFields[j] = in.readString(); } else if (type == 2) { cFields[j] = in.readInt(); } else if (type == 3) { cFields[j] = in.readLong(); } else if (type == 4) { cFields[j] = in.readFloat(); } else if (type == 5) { cFields[j] = in.readDouble(); } else if (type == 6) { cFields[j] = in.readByte(); } else if (type == 7) { cFields[j] = in.readShort(); } else if (type == 8) { cFields[j] = in.readBoolean(); } else if (type == 9) { cFields[j] = in.readBytesRef(); } else { throw new IOException("Can't match type [" + type + "]"); } } return new FieldDoc(in.readVInt(), in.readFloat(), cFields); }
/** * Read from a stream. */ public Bucket(StreamInput in, long subsetSize, long supersetSize, DocValueFormat format) throws IOException { super(in, subsetSize, supersetSize, format); termBytes = in.readBytesRef(); subsetDf = in.readVLong(); supersetDf = in.readVLong(); score = in.readDouble(); aggregations = InternalAggregations.readAggregations(in); }
private static Bucket createFromStream(StreamInput in, DocValueFormat format, boolean keyed) throws IOException { String key = in.getVersion().onOrAfter(Version.V_6_4_0) ? in.readString() : in.readOptionalString(); BytesRef from = in.readBoolean() ? in.readBytesRef() : null; BytesRef to = in.readBoolean() ? in.readBytesRef() : null; long docCount = in.readLong(); InternalAggregations aggregations = InternalAggregations.readAggregations(in); return new Bucket(format, keyed, key, from, to, docCount, aggregations); }
/** * Read from a stream. */ public StoreFileMetaData(StreamInput in) throws IOException { name = in.readString(); length = in.readVLong(); checksum = in.readString(); // TODO Why not Version.parse? writtenBy = Lucene.parseVersionLenient(in.readString(), FIRST_LUCENE_CHECKSUM_VERSION); hash = in.readBytesRef(); }
@Override public BytesRef readBytesRef(int length) throws IOException { if (!buffer.hasArray()) { return super.readBytesRef(length); } BytesRef bytesRef = new BytesRef(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), length); buffer.skipBytes(length); return bytesRef; }