@Override public int hashCode() { return new HashCodeBuilder().append(sliceStart).append(sliceEnd).append(getLimit()).toHashCode(); }
@Override public Iterator<Entry> getNewIterator(int newLimit) { if (newLimit>sliceQuery.getLimit()) sliceQuery = sliceQuery.updateLimit(newLimit); return getBasicIterator(); } }
private LimitAdjustingIterator() { super(query.getLimit(),sliceQuery.getLimit()); }
public boolean subsumes(SliceQuery oth) { Preconditions.checkNotNull(oth); if (this == oth) return true; if (oth.getLimit() > getLimit()) return false; else if (!hasLimit()) //the interval must be subsumed return sliceStart.compareTo(oth.sliceStart) <= 0 && sliceEnd.compareTo(oth.sliceEnd) >= 0; else //this the result might be cutoff due to limit, the start must be the same return sliceStart.compareTo(oth.sliceStart) == 0 && sliceEnd.compareTo(oth.sliceEnd) >= 0; }
public EntryList getSubset(final SliceQuery otherQuery, final EntryList otherResult) { assert otherQuery.subsumes(this); int pos = Collections.binarySearch(otherResult, sliceStart); if (pos < 0) pos = -pos - 1; final List<Entry> result = new ArrayList<>(); for (; pos < otherResult.size() && result.size() < getLimit(); pos++) { Entry e = otherResult.get(pos); if (e.getColumnAs(StaticBuffer.STATIC_FACTORY).compareTo(sliceEnd) < 0) result.add(e); else break; } return StaticArrayEntryList.of(result); }
private static String encodeQueries(List<SliceQuery> queries) { List<String> queryStrings = new ArrayList<>(queries.size()); for (SliceQuery query : queries) { String start = Hex.bytesToHex(query.getSliceStart().as(StaticBuffer.ARRAY_FACTORY)); String end = Hex.bytesToHex(query.getSliceEnd().as(StaticBuffer.ARRAY_FACTORY)); final int limit; if (query.hasLimit()) { limit = query.getLimit(); } else { limit = -1; } queryStrings.add(String.format("%s/%s/%d", start, end, limit)); } return Joiner.on(":").join(queryStrings); }
public SliceQuery(final SliceQuery query) { this(query.getSliceStart(), query.getSliceEnd()); setLimit(query.getLimit()); }
@Override public Iterator<Entry> iterator() { Iterator<Entry> iterator; //If there is a limit we need to wrap the basic iterator in a LimitAdjustingIterator which ensures the right number //of elements is returned. Otherwise we just return the basic iterator. if (sliceQuery.hasLimit() && sliceQuery.getLimit()!=query.getLimit()) { iterator = new LimitAdjustingIterator(); } else { iterator = getBasicIterator(); } return iterator; }
@Override public void process(StaticBuffer key, Map<SliceQuery, EntryList> entries, ScanMetrics metrics) { long vertexId = getVertexId(key); assert entries.get(VERTEX_EXISTS_QUERY)!=null; if (isGhostVertex(vertexId, entries.get(VERTEX_EXISTS_QUERY))) { metrics.incrementCustom(GHOST_VERTEX_COUNT); return; } JanusGraphVertex vertex = tx.getInternalVertex(vertexId); Preconditions.checkArgument(vertex instanceof PreloadedVertex, "The bounding transaction is not configured correctly"); PreloadedVertex v = (PreloadedVertex)vertex; v.setAccessCheck(PreloadedVertex.OPENSTAR_CHECK); for (Map.Entry<SliceQuery,EntryList> entry : entries.entrySet()) { SliceQuery sq = entry.getKey(); if (sq.equals(VERTEX_EXISTS_QUERY)) continue; EntryList entryList = entry.getValue(); if (entryList.size()>=sq.getLimit()) metrics.incrementCustom(TRUNCATED_ENTRY_LISTS); v.addToQueryCache(sq.updateLimit(Query.NO_LIMIT),entryList); } job.process(v, metrics); }
@Override public Map<StaticBuffer, EntryList> getSlice(final List<StaticBuffer> keys, final SliceQuery query, final StoreTransaction txh) throws BackendException { log.debug("Entering getSliceMultiSliceQuery table:{} keys:{} query:{} txh:{}", getTableName(), encodeForLog(keys), encodeForLog(query), txh); final Map<StaticBuffer, EntryList> entries = //convert keys to get item workers and get the items client.getDelegate().parallelGetItem(keys.stream().map(this::createGetItemWorker).collect(Collectors.toList())) .entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> extractEntriesFromGetItemResult(entry.getValue(), query.getSliceStart(), query.getSliceEnd(), query.getLimit()))); log.debug("Exiting getSliceMultiSliceQuery table:{} keys:{} query:{} txh:{} returning:{}", getTableName(), encodeForLog(keys), encodeForLog(query), txh, entries.size()); return entries; }
private List<Entry> decodeSlice(final Map<String, AttributeValue> item) { final List<Entry> entries = new EntryBuilder(item).buildAll(); final Entry sliceStartEntry = StaticArrayEntry.of(sliceQuery.getSliceStart(), BufferUtil.emptyBuffer()); final Entry sliceEndEntry = StaticArrayEntry.of(sliceQuery.getSliceEnd(), BufferUtil.emptyBuffer()); final List<Entry> filteredEntries = new ArrayList<>(entries.size()); for (Entry entry : entries) { if (entry.compareTo(sliceStartEntry) >= 0 && entry.compareTo(sliceEndEntry) < 0) { filteredEntries.add(entry); } } return filteredEntries.subList(0, Math.min(filteredEntries.size(), sliceQuery.getLimit())); }
protected String encodeForLog(final SliceQuery query) { return "slice[rk:" + encodeKeyForLog(query.getSliceStart()) + " -> " + encodeKeyForLog(query.getSliceEnd()) + " limit:" + query.getLimit() + "]"; }
EntryRecordIterator(final SliceQuery sliceQuery, final CQLColValGetter getter, final Iterator<Row> iterator, final StaticBuffer key) { this.getter = getter; final StaticBuffer sliceEnd = sliceQuery.getSliceEnd(); this.iterator = iterator .<Tuple3<StaticBuffer, StaticBuffer, Row>> map(row -> Tuple.of( StaticArrayBuffer.of(row.getBytes(CQLKeyColumnValueStore.COLUMN_COLUMN_NAME)), StaticArrayBuffer.of(row.getBytes(CQLKeyColumnValueStore.VALUE_COLUMN_NAME)), row)) .takeWhile(tuple -> key.equals(StaticArrayBuffer.of(tuple._3.getBytes(CQLKeyColumnValueStore.KEY_COLUMN_NAME))) && !sliceEnd.equals(tuple._1)) .take(sliceQuery.getLimit()); }
private SliceRange getSliceRange(final int limit) { final SliceRange sliceRange = new SliceRange(); sliceRange.setStart(JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getSliceStart().asByteBuffer()); sliceRange.setFinish(JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getSliceEnd().asByteBuffer()); sliceRange.setCount(Math.min(limit, JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getLimit())); return sliceRange; } }
private SliceRange getSliceRange(final int limit) { final SliceRange sliceRange = new SliceRange(); sliceRange.setStart(JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getSliceStart().asByteBuffer()); sliceRange.setFinish(JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getSliceEnd().asByteBuffer()); sliceRange.setCount(Math.min(limit, JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getLimit())); return sliceRange; } }
private SliceRange getSliceRange(final int limit) { final SliceRange sliceRange = new SliceRange(); sliceRange.setStart(JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getSliceStart().asByteBuffer()); sliceRange.setFinish(JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getSliceEnd().asByteBuffer()); sliceRange.setCount(Math.min(limit, JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getLimit())); return sliceRange; } }
public QueryWorker buildQueryWorker(final StaticBuffer hashKey, final SliceQuery query) { final QueryRequest request = createQueryRequest(hashKey, query); // Only enforce a limit when Titan tells us to if (query.hasLimit()) { final int limit = query.getLimit(); request.setLimit(limit); return new QueryWithLimitWorker(client.getDelegate(), request, hashKey, limit); } return new QueryWorker(client.getDelegate(), request, hashKey); }
public static Filter getFilter(SliceQuery query) { byte[] colStartBytes = query.getSliceStart().length() > 0 ? query.getSliceStart().as(StaticBuffer.ARRAY_FACTORY) : null; byte[] colEndBytes = query.getSliceEnd().length() > 0 ? query.getSliceEnd().as(StaticBuffer.ARRAY_FACTORY) : null; Filter filter = new ColumnRangeFilter(colStartBytes, true, colEndBytes, false); if (query.hasLimit()) { filter = new FilterList(FilterList.Operator.MUST_PASS_ALL, filter, new ColumnPaginationFilter(query.getLimit(), 0)); } logger.debug("Generated HBase Filter {}", filter); return filter; }
endIndex = Math.min(endIndex, query.getLimit() + lowestStartMatch);