public boolean isEmpty() { return size() == 0; }
public boolean isEmpty() { return size() == 0; }
public boolean isEmpty() { return size() == 0; }
public boolean isEmpty() { return size() == 0; }
public static Map<ByteBuffer, UserType> fromSchema(Row row) { UntypedResultSet results = QueryProcessor.resultify("SELECT * FROM system." + SystemKeyspace.SCHEMA_USER_TYPES_CF, row); Map<ByteBuffer, UserType> types = new HashMap<>(results.size()); for (UntypedResultSet.Row result : results) { UserType type = fromSchema(result); types.put(type.name, type); } return types; }
private void replayAllFailedBatches() throws ExecutionException, InterruptedException { logger.debug("Started replayAllFailedBatches"); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272). int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / StorageService.instance.getTokenMetadata().getAllEndpoints().size(); RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); UntypedResultSet page = executeInternal(String.format("SELECT id, data, written_at, version FROM %s.%s LIMIT %d", Keyspace.SYSTEM_KS, SystemKeyspace.BATCHLOG_CF, PAGE_SIZE)); while (!page.isEmpty()) { UUID id = processBatchlogPage(page, rateLimiter); if (page.size() < PAGE_SIZE) break; // we've exhausted the batchlog, next query would be empty. page = executeInternal(String.format("SELECT id, data, written_at, version FROM %s.%s WHERE token(id) > token(?) LIMIT %d", Keyspace.SYSTEM_KS, SystemKeyspace.BATCHLOG_CF, PAGE_SIZE), id); } cleanup(); logger.debug("Finished replayAllFailedBatches"); }
private UUID processBatchlogPage(UntypedResultSet page, RateLimiter rateLimiter) ArrayList<Batch> batches = new ArrayList<>(page.size());
public boolean rowExists(final IndexService indexService, final String type, final String id) throws InvalidRequestException, RequestExecutionException, RequestValidationException, IOException { DocPrimaryKey docPk = parseElasticId(indexService, type, id); return process(ConsistencyLevel.LOCAL_ONE, buildExistsQuery(indexService.mapperService().documentMapper(type), indexService.keyspace(), typeToCfName(type), id), docPk.values).size() > 0; }