@Override public Iterator<BackendEntry> query(BackendSession session, Query query) { if (query.paging()) { throw new NotSupportException("paging by InMemoryDBStore"); if (!query.ids().isEmpty()) { rs = this.queryById(query.ids(), rs); if (!query.conditions().isEmpty()) { rs = this.queryByFilter(query.conditions(), rs); if (query.offset() >= rs.size()) { return Collections.emptyIterator(); iterator = this.skipOffset(iterator, query.offset()); if (query.limit() != Query.NO_LIMIT && query.offset() + query.limit() < rs.size()) { iterator = this.dropTails(iterator, query.limit());
@Override public Query writeQuery(Query query) { HugeType type = query.resultType(); if (type.isEdge() && !query.conditions().isEmpty()) { if (!query.ids().isEmpty()) { throw new BackendException("Not supported query edge by id " + "and by condition at the same time"); if (query instanceof IdQuery && !query.ids().isEmpty()) { IdQuery result = (IdQuery) query.copy(); result.resetIds(); for (Id id : query.ids()) { result.query(this.writeQueryId(type, id)); if (query instanceof ConditionQuery && !query.conditions().isEmpty()) { query = this.writeQueryCondition(query);
public boolean empty() { return this.ids().isEmpty() && this.conditions().isEmpty(); }
if (query.offset() != 0) { LOG.debug("Query offset is not supported on Cassandra store " + "currently, it will be replaced by [0, offset + limit)"); if (query.limit() != Query.NO_LIMIT) { long total = query.total(); String page = query.page(); if (page == null) { select.limit((int) total); for (Map.Entry<HugeKeys, Order> order : query.orders().entrySet()) { String name = formatKey(order.getKey()); if (order.getValue() == Order.ASC) { if (query.conditions().isEmpty()) {
@Override public Iterator<BackendEntry> query(Session session, Query query) { if (query.limit() == 0 && query.limit() != Query.NO_LIMIT) { LOG.debug("Return empty result(limit=0) for query {}", query); return ImmutableList.<BackendEntry>of().iterator(); if (query.empty()) { return newEntryIterator(this.queryAll(session, query), query); if (query.conditions().isEmpty()) { assert !query.ids().isEmpty(); RowIterator rowIterator = null; if (query.ids().size() == 1) { Id id = query.ids().iterator().next(); rowIterator = this.queryById(session, id); } else { rowIterator = this.queryByIds(session, query.ids());
if (query.conditions().isEmpty()) { if (!query.orders().isEmpty()) { this.wrapOrderBy(selection, query); if (query.paging()) { this.wrapPage(selection, query); } else if (query.limit() != Query.NO_LIMIT || query.offset() > 0) { this.wrapOffset(selection, query);
@Override public Iterator<BackendEntry> query(BackendSession session, Query query) { Set<Condition> conditions = query.conditions(); E.checkState(query instanceof ConditionQuery && conditions.size() == 2, Id id = SplicingIdGenerator.splicing(indexLabelId, fieldValue); IdQuery q = new IdQuery(query, id); q.offset(query.offset()); q.limit(query.limit()); return super.query(session, q);
protected void wrapPage(StringBuilder select, Query query) { String page = query.page(); // It's the first time if page is empty if (!page.isEmpty()) { PageState pageState = PageState.fromString(page); Map<HugeKeys, Object> columns = pageState.columns(); List<HugeKeys> idColumnNames = this.idColumnName(); List<Object> values = new ArrayList<>(idColumnNames.size()); for (HugeKeys key : idColumnNames) { values.add(columns.get(key)); } // Need add `where` to `select` when query is IdQuery boolean startWithWhere = query.conditions().isEmpty(); WhereBuilder where = new WhereBuilder(startWithWhere); where.gte(formatKeys(idColumnNames), values); select.append(where.build()); } assert query.limit() != Query.NO_LIMIT; // Fetch `limit + 1` records for judging whether reached the last page select.append(" limit "); select.append(query.limit() + 1); select.append(";"); }
public BinaryEntryIterator(BackendIterator<Elem> results, Query query, BiFunction<BackendEntry, Elem, BackendEntry> m) { super(query); E.checkNotNull(results, "results"); E.checkNotNull(m, "merger"); this.results = results; this.merger = m; this.next = null; this.skipOffset(); if (query.paging()) { this.skipPageOffset(query.page()); } }
public IdRangeQuery(Query originQuery, Id start, boolean inclusiveStart, Id end, boolean inclusiveEnd) { this(originQuery.resultType(), originQuery, start, inclusiveStart, end, inclusiveEnd); }
/** * Mapping query-type to table-type * @param query origin query * @return corresponding table type */ public static HugeType tableType(Query query) { HugeType type = query.resultType(); // Mapping EDGE to EDGE_OUT/EDGE_IN if (type == HugeType.EDGE) { // We assume query OUT edges type = HugeType.EDGE_OUT; while (!(query instanceof ConditionQuery || query.originQuery() == null)) { /* * Some backends(like RocksDB) may trans ConditionQuery to * IdQuery or IdPrefixQuery, so we should get the origin query. */ query = query.originQuery(); } if (!query.conditions().isEmpty() && query instanceof ConditionQuery) { ConditionQuery cq = (ConditionQuery) query; // Does query IN edges if (cq.condition(HugeKeys.DIRECTION) == Directions.IN) { type = HugeType.EDGE_IN; } } } return type; }
IdQuery ids = new IdQuery(query.resultType(), query); for (ConditionQuery cq: ConditionQueryFlatten.flatten( (ConditionQuery) query)) { if (q.getClass() == IdQuery.class && !q.ids().isEmpty()) { ids.query(q.ids()); } else if (!q.empty()) {
public CassandraEntryIterator(ResultSet results, Query query, BiFunction<BackendEntry, Row, BackendEntry> merger) { super(query); this.results = results; this.rows = results.iterator(); this.remaining = results.getAvailableWithoutFetching(); this.merger = merger; this.next = null; this.skipOffset(); if (query.paging()) { E.checkState(this.remaining == query.limit() || results.isFullyFetched(), "Unexpected fetched page size: %s", this.remaining); } }
protected Iterator<HugeEdge> queryEdgesFromBackend(Query query) { assert query.resultType().isEdge(); Iterator<BackendEntry> entries = this.query(query); return new FlatMapperIterator<>(entries, entry -> { // Edges are in a vertex HugeVertex vertex = this.serializer.readVertex(graph(), entry); assert vertex != null; if (query.ids().size() == 1) { assert vertex.getEdges().size() == 1; } // Copy to avoid ConcurrentModificationException when removing edge return ImmutableList.copyOf(vertex.getEdges()).iterator(); }); }
protected List<StringBuilder> queryCondition2Select(Query query, StringBuilder select) { // Query by conditions Set<Condition> conditions = query.conditions(); List<StringBuilder> clauses = new ArrayList<>(conditions.size()); for (Condition condition : conditions) { clauses.add(this.condition2Sql(condition)); } WhereBuilder where = new WhereBuilder(); where.and(clauses); select.append(where.build()); return ImmutableList.of(select); }
@Test public void testQueryAllWithLimitByQueryVertices() { HugeGraph graph = graph(); init10Vertices(); Query query = new Query(HugeType.VERTEX); query.limit(1); Iterator<Vertex> itor = graph.graphTransaction().queryVertices(query); List<Vertex> vertices = IteratorUtils.list(itor); Assert.assertEquals(1, vertices.size()); }
private Iterator<Edge> edgesOfVertex(Id source, Directions dir, Id label, long limit) { Id[] labels = {}; if (label != null) { labels = new Id[]{label}; } Query query = GraphTransaction.constructEdgesQuery(source, dir, labels); if (limit != NO_LIMIT) { query.limit(limit); } return this.graph.edges(query); }
public Iterator<Vertex> queryVertices() { Query q = new Query(HugeType.VERTEX); return this.queryVertices(q); }