@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { final List<PartitionQuery<K, T>> list = new ArrayList<>(); final PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query); pqi.setConf(getConf()); list.add(pqi); return list; }
@SuppressWarnings("rawtypes") @Override public boolean equals(Object obj) { if(obj instanceof FileSplitPartitionQuery) { return super.equals(obj) && this.split.equals(((FileSplitPartitionQuery)obj).split); } return false; }
@Override public List<PartitionQuery<String, MockPersistent>> getPartitions( Query<String, MockPersistent> query) throws IOException { ArrayList<PartitionQuery<String, MockPersistent>> list = new ArrayList<>(); for(int i=0; i<NUM_PARTITIONS; i++) { list.add(new PartitionQueryImpl<>(query, LOCATIONS[i])); } return list; }
ModifiableSolrParams params = new ModifiableSolrParams(); if (query instanceof PartitionQueryImpl) { query = ((PartitionQueryImpl<K, T>)query).getBaseQuery();
@Test public void testReadWrite() throws Exception { MockQuery baseQuery = dataStore.newQuery(); baseQuery.setStartKey("start"); baseQuery.setLimit(42); PartitionQueryImpl<String, MockPersistent> query = new PartitionQueryImpl<>(baseQuery); TestWritable.testWritable(query); }
/** * {@inheritDoc} */ @Override public Result<K, T> execute(Query<K, T> query) throws GoraException { String[] fields = getFieldsToQuery(query.getFields()); OrientDBQuery dataStoreQuery; if (query instanceof OrientDBQuery) { dataStoreQuery = ((OrientDBQuery) query); } else { dataStoreQuery = (OrientDBQuery) ((PartitionQueryImpl<K, T>) query).getBaseQuery(); } dataStoreQuery.populateOrientDBQuery(orientDBMapping, fields, getFields()); try (ODatabaseDocumentTx selectTx = connectionPool.acquire()) { selectTx.activateOnCurrentThread(); OConcurrentResultSet<ODocument> result = selectTx.command(dataStoreQuery.getOrientDBQuery()) .execute(dataStoreQuery.getParams()); result.setLimit((int) query.getLimit()); return new OrientDBResult<K, T>(this, query, result); } catch (Exception e) { throw new GoraException(e); } }
@Override /** * Returns a single partition containing the original query */ public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query){ List<PartitionQuery<K, T>> list = new ArrayList<>(); PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query); pqi.setConf(getConf()); list.add(pqi); return list; }
@SuppressWarnings("rawtypes") @Override public boolean equals(Object obj) { if(obj instanceof FileSplitPartitionQuery) { return super.equals(obj) && this.split.equals(((FileSplitPartitionQuery)obj).split); } return false; }
@Override /** * Returns a single partition containing the original query */ public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query){ List<PartitionQuery<K, T>> list = new ArrayList<>(); PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query); pqi.setConf(getConf()); list.add(pqi); return list; }
/** * {@inheritDoc} */ @Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { // TODO : Improve code on OrientDB clusters List<PartitionQuery<K, T>> partitions = new ArrayList<>(); PartitionQueryImpl<K, T> partitionQuery = new PartitionQueryImpl<>( query); partitionQuery.setConf(this.getConf()); partitions.add(partitionQuery); return partitions; }
@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { // TODO: implement this using Hadoop DB support ArrayList<PartitionQuery<K, T>> partitions = new ArrayList<>(); PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query); pqi.setConf(getConf()); partitions.add(pqi); return partitions; }
@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { final List<PartitionQuery<K, T>> list = new ArrayList<>(); final PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query); pqi.setConf(getConf()); list.add(pqi); return list; }
@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { List<PartitionQuery<K, T>> partitions = new ArrayList<>(); PartitionQueryImpl<K, T> partitionQuery = new PartitionQueryImpl<>( query); partitionQuery.setConf(getConf()); partitions.add(partitionQuery); return partitions; }
@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { // TODO GORA-298 Implement CassandraStore#getPartitions List<PartitionQuery<K,T>> partitions = new ArrayList<PartitionQuery<K,T>>(); PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<K, T>(query); pqi.setConf(getConf()); partitions.add(pqi); return partitions; }
/** * {@inheritDoc} * As the Aerospike does not support query key ranges as at the moment, only the single partition * is retrieved with this method. * * @param query the query to execute. * @return the list of partitions, one partion at the list as at the moment */ @Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { List<PartitionQuery<K, T>> partitions = new ArrayList<>(); PartitionQueryImpl<K, T> partitionQuery = new PartitionQueryImpl<>( query); partitionQuery.setConf(getConf()); partitions.add(partitionQuery); return partitions; }
/** * Partitions the given query and returns a list of PartitionQuerys, which * will execute on local data. */ @Override public List<PartitionQuery<K, T>> getPartitions(final Query<K, T> query) throws IOException { // FIXME: for now, there is only one partition as we do not handle // MongoDB sharding configuration List<PartitionQuery<K, T>> partitions = new ArrayList<>(); PartitionQueryImpl<K, T> partitionQuery = new PartitionQueryImpl<>( query); partitionQuery.setConf(getConf()); partitions.add(partitionQuery); return partitions; }
/** * {@inheritDoc} * As the Aerospike does not support query key ranges as at the moment, only the single partition * is retrieved with this method. * * @param query the query to execute. * @return the list of partitions, one partion at the list as at the moment */ @Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { List<PartitionQuery<K, T>> partitions = new ArrayList<>(); PartitionQueryImpl<K, T> partitionQuery = new PartitionQueryImpl<>( query); partitionQuery.setConf(getConf()); partitions.add(partitionQuery); return partitions; }
PartitionQueryImpl<K, T> partition = new PartitionQueryImpl<>( query, memberOwnedCacheEntries.first(), memberOwnedCacheEntries.last(), member.getSocketAddress().getHostString()); partition.setConf(this.getConf()); partitions.add(partition);
@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { List<PartitionQuery<K, T>> partitions = new ArrayList<>(); try { Member[] clusterMembers = new Member[hazelcastInstance.getCluster().getMembers().size()]; this.hazelcastInstance.getCluster().getMembers().toArray(clusterMembers); for (Member member : clusterMembers) { JCacheResult<K, T> result = ((JCacheResult<K, T>) query.execute()); ConcurrentSkipListSet<K> memberOwnedCacheEntries = new ConcurrentSkipListSet<>(); while (result.next()) { K key = result.getKey(); Partition partition = hazelcastInstance.getPartitionService().getPartition(key); if (partition.getOwner().getUuid().equals(member.getUuid())) { memberOwnedCacheEntries.add(key); } } PartitionQueryImpl<K, T> partition = new PartitionQueryImpl<>( query, memberOwnedCacheEntries.first(), memberOwnedCacheEntries.last(), member.getSocketAddress().getHostString()); partition.setConf(this.getConf()); partitions.add(partition); } } catch (java.lang.Exception ex) { LOG.error("Exception occurred while partitioning the query based on Hazelcast partitions.", ex); return null; } LOG.info("Query is partitioned to {} number of partitions.", partitions.size()); return partitions; }
null : HBaseByteInterface.fromBytes(keyClass, splitStop); PartitionQueryImpl<K, T> partition = new PartitionQueryImpl<>( query, startKey, endKey, regionLocation); partition.setConf(getConf());