@Override public SortedBinaryDocValues getBytesValues() { SortedBinaryDocValues inValues = in.getBytesValues(); return new SortedBinaryDocValues() { @Override public BytesRef nextValue() throws IOException { BytesRef encoded = inValues.nextValue(); return new BytesRef(Uid.decodeId( Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length))); } @Override public int docValueCount() { final int count = inValues.docValueCount(); // If the count is not 1 then the impl is not correct as the binary representation // does not preserve order. But id fields only have one value per doc so we are good. assert count == 1; return inValues.docValueCount(); } @Override public boolean advanceExact(int doc) throws IOException { return inValues.advanceExact(doc); } }; } };
/** * Returns field values for use by returned hits. */ default ScriptDocValues<?> getLegacyFieldValues() { return getScriptValues(); }
@Override public void close() { idFieldData.close(); }
data = indexFieldData.load(subReaderContext); if (format == null) { scriptValues = data.getLegacyFieldValues(); } else if (indexFieldData instanceof IndexNumericFieldData) { if (((IndexNumericFieldData) indexFieldData).getNumericType().isFloatingPoint()) { binaryValues = data.getBytesValues();
@Override public void setNextReader(final AtomicReaderContext context) throws IOException { final int currentCount = _entries.size(); if(currentCount > _maxPerShard) return; // Heuristic: only load the values with hashes if we're in exhaustive // mode and we aren't coming close to hitting our per-shard limit. // If either of these conditions are false, load the data without // hashes, and they'll get calculated on the fly anyway. _values = (_exhaustive && currentCount * 1.1 < _maxPerShard) ? _indexFieldData.load(context).getHashedBytesValues() : _indexFieldData.load(context).getBytesValues(); }
@Override public long ramBytesUsed() { return in.ramBytesUsed(); }
data = indexFieldData.load(subReaderContext); if (format == null) { scriptValues = data.getLegacyFieldValues(); } else if (indexFieldData instanceof IndexNumericFieldData) { if (((IndexNumericFieldData) indexFieldData).getNumericType().isFloatingPoint()) { binaryValues = data.getBytesValues();
@Override public long ramBytesUsed() { return in.ramBytesUsed(); }
@Override public SortedBinaryDocValues bytesValues(LeafReaderContext context) { return indexFieldData.load(context).getBytesValues(); } }
@Override public ScriptDocValues<?> run() { return fieldDataLookup.apply(fieldType).load(reader).getScriptValues(); } });
@Override public void close() { in.close(); }
@Override public long ramBytesUsed() { return in.ramBytesUsed(); }
protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOException { return indexFieldData.load(context).getBytesValues(); }
/** * Returns field values for use by returned hits. */ default ScriptDocValues<?> getLegacyFieldValues() { return getScriptValues(); }
@Override public void close() { in.close(); }
@Override public SortedBinaryDocValues getBytesValues() { SortedBinaryDocValues idValues = idFieldData.getBytesValues(); return new SortedBinaryDocValues() { private final BytesRefBuilder scratch = new BytesRefBuilder(); @Override public boolean advanceExact(int doc) throws IOException { return idValues.advanceExact(doc); } @Override public int docValueCount() { return idValues.docValueCount(); } @Override public BytesRef nextValue() throws IOException { BytesRef nextID = idValues.nextValue(); scratch.copyBytes(prefix); scratch.append(nextID); return scratch.get(); } }; }
@Override public ScriptDocValues<?> run() { return fieldDataLookup.apply(fieldType).load(reader).getScriptValues(); } });
@Override public void close() { idFieldData.close(); }
if (fieldData != null) { AtomicFieldData leafData = fieldData.load(ctx); values = leafData.getBytesValues(); if (values == null) throw new NullPointerException("failed to get fielddata"); } else {
@Override public ScriptDocValues<?> run() { return fieldDataLookup.apply(fieldType).load(reader).getScriptValues(); } });