/** * Select records filtered by a Filter and Qualifiers * * @param namespace Namespace to storing the data * @param set Set storing the data * @param filter Aerospike Filter to be used * @param qualifiers Zero or more Qualifiers for the update query * @return A KeyRecordIterator to iterate over the results */ public KeyRecordIterator select(String namespace, String set, Filter filter, Qualifier... qualifiers) { Statement stmt = new Statement(); stmt.setNamespace(namespace); stmt.setSetName(set); if (filter != null) stmt.setFilter(filter); return select(stmt, qualifiers); }
private KeyRecordIterator queryByLua(Statement stmt, Boolean metaOnly, Node node, Qualifier[] qualifiers){ Map<String, Object> originArgs = new HashMap<String, Object>(); originArgs.put("includeAllFields", 1); ResultSet resultSet = null; String filterFuncStr = buildFilterFunction(qualifiers); originArgs.put("filterFuncStr", filterFuncStr); if (metaOnly) stmt.setAggregateFunction(this.getClass().getClassLoader(), AS_UTILITY_PATH, QUERY_MODULE, "query_meta", Value.get(originArgs)); else stmt.setAggregateFunction(this.getClass().getClassLoader(), AS_UTILITY_PATH, QUERY_MODULE, "select_records", Value.get(originArgs)); if (node != null) { resultSet = this.client.queryAggregateNode(queryPolicy, stmt, node); } else { resultSet = this.client.queryAggregate(queryPolicy, stmt); } return new KeyRecordIterator(stmt.getNamespace(), resultSet); }
public ResultSet<T> now() { collectMetaData(); Statement statement = new Statement(); statement.setIndexName(indexName); statement.setNamespace(namespace); statement.setSetName(setName); statement.setFilters(filters); RecordSet recordSet = asynClient.query(policy, statement); return new ResultSet<>(mapper, classConstructor, recordsCache, recordSet, asynClient); }
/** * Method to create a Aerospike specific statement * * @param namespace the namespace * @param set the set * @return the statement */ private Statement getStatement(String namespace, String set) { Statement stmt = new Statement(); stmt.setNamespace(namespace); stmt.setSetName(set); return stmt; }
public void cleanExpiredDocumentsBefore(long expireTimeMillis) { long duration = expireTimeMillis - System.currentTimeMillis(); int expiration = (int) TimeUnit.MILLISECONDS.toSeconds(duration); Value value = Value.get(expiration); Statement statement = new Statement(); statement.setNamespace(namespace); ExecuteTask executeTask = client.execute(null, statement, PACKAGE_NAME, FUNC_NAME, value); executeTask.waitTillComplete(SLEEP_INTERVAL, TIMEOUT); } }
if (statement.getNamespace() != null) { dataOffset += Buffer.estimateSizeUtf8(statement.getNamespace()) + FIELD_HEADER_SIZE; fieldCount++; if (statement.getIndexName() != null) { dataOffset += Buffer.estimateSizeUtf8(statement.getIndexName()) + FIELD_HEADER_SIZE; fieldCount++; if (statement.getSetName() != null) { dataOffset += Buffer.estimateSizeUtf8(statement.getSetName()) + FIELD_HEADER_SIZE; fieldCount++; Filter filter = statement.getFilter(); String[] binNames = statement.getBinNames(); PredExp[] predExp = statement.getPredExp(); int predSize = 0; if (statement.getFunctionName() != null) { dataOffset += FIELD_HEADER_SIZE + 1; // udf type dataOffset += Buffer.estimateSizeUtf8(statement.getPackageName()) + FIELD_HEADER_SIZE; dataOffset += Buffer.estimateSizeUtf8(statement.getFunctionName()) + FIELD_HEADER_SIZE; if (statement.getFunctionArgs().length > 0) { functionArgBuffer = Packer.pack(statement.getFunctionArgs()); if (statement.getNamespace() != null) { writeField(statement.getNamespace(), FieldType.NAMESPACE);
else recordSet = this.client.query(queryPolicy, stmt); return new KeyRecordIterator(stmt.getNamespace(), recordSet); Key key = kq.makeKey(stmt.getNamespace(), stmt.getSetName()); Record record = null; if (metaOnly) record = this.client.getHeader(null, key); else record = this.client.get(null, key, stmt.getBinNames()); if (record == null) { return new KeyRecordIterator(stmt.getNamespace()); } else { KeyRecord keyRecord = new KeyRecord(key, record); return new KeyRecordIterator(stmt.getNamespace(), keyRecord); Filter filter = q == null ? null : q.asFilter(); if (filter != null) { stmt.setFilter(filter); q.asFilter(true);; break; Filter filter = qualifier.asFilter(); if (filter != null) { stmt.setFilter(filter); qualifiers[i] = null; break; predexps = buildPredExp(qualifiers).toArray(new PredExp[0]);
/** * inserts a record, with a time to live, using a Statement and KeyQualifier. If the record exists, and exception will be thrown. * * @param stmt A Statement object containing Namespace and Set * @param keyQualifier KeyQualifier containin the primary key * @param bins A list of Bins to insert * @param ttl The record time to live in seconds */ public void insert(Statement stmt, KeyQualifier keyQualifier, List<Bin> bins, int ttl) { Key key = keyQualifier.makeKey(stmt.getNamespace(), stmt.getSetName()); // Key key = new Key(stmt.getNamespace(), stmt.getSetName(), keyQualifier.getValue1()); this.client.put(this.insertPolicy, key, bins.toArray(new Bin[0])); }
policy = writePolicyDefault; statement.setAggregateFunction(packageName, functionName, functionArgs); statement.prepare(false);
super(eventLoop, cluster); this.listener = listener; statement.prepare(true); executeValidate(tasks, policy.maxConcurrentNodes, statement.getNamespace());
@Override public long getCommittedWindowId(String appId, int operatorId) { try { lastWindowFetchCommand.setFilters(Filter.equal(metaTableOperatorIdColumn, operatorId)); lastWindowFetchCommand.setFilters(Filter.equal(metaTableAppIdColumn, appId)); long lastWindow = -1; RecordSet recordSet = client.query(null, lastWindowFetchCommand); while(recordSet.next()) { lastWindow = Long.parseLong(recordSet.getRecord().getValue(metaTableWindowColumn).toString()); } return lastWindow; } catch (AerospikeException ex) { throw new RuntimeException(ex); } }
/** * Execute query, apply statement's aggregation function, and return result iterator. The query * executor puts results on a queue in separate threads. The calling thread concurrently pops * results off the queue through the result iterator. * <p> * The aggregation function is called on both server and client (final reduce). Therefore, * the Lua script files must also reside on both server and client. * The package name is used to locate the udf file location: * <p> * udf file = <udf dir>/<package name>.lua * * @param policy generic configuration parameters, pass in null for defaults * @param statement database query command * @param packageName server package where user defined function resides * @param functionName aggregation function name * @param functionArgs arguments to pass to function name, if any * @return result iterator * @throws AerospikeException if query fails */ public final ResultSet queryAggregate( QueryPolicy policy, Statement statement, String packageName, String functionName, Value... functionArgs ) throws AerospikeException { statement.setAggregateFunction(packageName, functionName, functionArgs); return queryAggregate(policy, statement); }
/** * Method to create a Aerospike specific statement * * @param namespace the namespace * @param set the set * @return the statement */ private Statement getStatement(String namespace, String set) { Statement stmt = new Statement(); stmt.setNamespace(namespace); stmt.setSetName(set); return stmt; }
if (statement.getNamespace() != null) { dataOffset += Buffer.estimateSizeUtf8(statement.getNamespace()) + FIELD_HEADER_SIZE; fieldCount++; if (statement.getIndexName() != null) { dataOffset += Buffer.estimateSizeUtf8(statement.getIndexName()) + FIELD_HEADER_SIZE; fieldCount++; if (statement.getSetName() != null) { dataOffset += Buffer.estimateSizeUtf8(statement.getSetName()) + FIELD_HEADER_SIZE; fieldCount++; Filter filter = statement.getFilter(); String[] binNames = statement.getBinNames(); PredExp[] predExp = statement.getPredExp(); int predSize = 0; if (statement.getFunctionName() != null) { dataOffset += FIELD_HEADER_SIZE + 1; // udf type dataOffset += Buffer.estimateSizeUtf8(statement.getPackageName()) + FIELD_HEADER_SIZE; dataOffset += Buffer.estimateSizeUtf8(statement.getFunctionName()) + FIELD_HEADER_SIZE; if (statement.getFunctionArgs().length > 0) { functionArgBuffer = Packer.pack(statement.getFunctionArgs()); if (statement.getNamespace() != null) { writeField(statement.getNamespace(), FieldType.NAMESPACE);
protected boolean isIndexedBin(Statement stmt, Qualifier qualifier) { if(null == qualifier.getField()) return false; Index index = this.indexCache.get(String.join(":", Arrays.asList(stmt.getNamespace(), stmt.getSetName(), qualifier.getField()))); if (index == null) return false; switch (qualifier.getOperation()){ case EQ: case BETWEEN: case GT: case GTEQ: case LT: case LTEQ: return true; default: return false; } }
policy = writePolicyDefault; statement.setAggregateFunction(packageName, functionName, functionArgs); statement.prepare(false);
super(eventLoop, cluster); this.listener = listener; statement.prepare(true); executeValidate(tasks, policy.maxConcurrentNodes, statement.getNamespace());
@Override public long getCommittedWindowId(String appId, int operatorId) { try { lastWindowFetchCommand.setFilters(Filter.equal(metaTableOperatorIdColumn, operatorId)); lastWindowFetchCommand.setFilters(Filter.equal(metaTableAppIdColumn, appId)); long lastWindow = -1; RecordSet recordSet = client.query(null, lastWindowFetchCommand); while (recordSet.next()) { lastWindow = Long.parseLong(recordSet.getRecord().getValue(metaTableWindowColumn).toString()); } return lastWindow; } catch (AerospikeException ex) { throw new RuntimeException(ex); } }
/** * Execute query, apply statement's aggregation function, and return result iterator. The query * executor puts results on a queue in separate threads. The calling thread concurrently pops * results off the queue through the result iterator. * <p> * The aggregation function is called on both server and client (final reduce). Therefore, * the Lua script files must also reside on both server and client. * The package name is used to locate the udf file location: * <p> * udf file = <udf dir>/<package name>.lua * * @param policy generic configuration parameters, pass in null for defaults * @param statement database query command * @param packageName server package where user defined function resides * @param functionName aggregation function name * @param functionArgs arguments to pass to function name, if any * @return result iterator * @throws AerospikeException if query fails */ public final ResultSet queryAggregate( QueryPolicy policy, Statement statement, String packageName, String functionName, Value... functionArgs ) throws AerospikeException { statement.setAggregateFunction(packageName, functionName, functionArgs); return queryAggregate(policy, statement); }
/** * Select records filtered by Qualifiers * * @param namespace Namespace to storing the data * @param set Set storing the data * @param filter Aerospike Filter to be used * @param sortMap <STRONG>NOT IMPLEMENTED</STRONG> * @param qualifiers Zero or more Qualifiers for the update query * @return A KeyRecordIterator to iterate over the results */ public KeyRecordIterator select(String namespace, String set, Filter filter, Map<String, String> sortMap, Qualifier... qualifiers) { Statement stmt = new Statement(); stmt.setNamespace(namespace); stmt.setSetName(set); if (filter != null) stmt.setFilter(filter); return select(stmt, sortMap, qualifiers); }