private static Collection<IColumn> getFieldCacheEntries(IndexReader indexReader, String field) throws IOException { String indexName = SolandraCoreContainer.coreInfo.get().indexName + "~" + SolandraCoreContainer.coreInfo.get().shard; byte[] indexNameBytes = indexName.getBytes("UTF-8"); if(logger.isDebugEnabled()) logger.debug("Loading field cache from " + indexName + " " + field); ColumnParent fieldCacheParent = new ColumnParent(CassandraUtils.fieldCacheColumnFamily); ByteBuffer fieldCacheKey = CassandraUtils.hashKeyBytes(indexNameBytes, CassandraUtils.delimeterBytes, field .getBytes()); List<Row> rows = CassandraUtils.robustRead(CassandraUtils.consistency, new SliceFromReadCommand( CassandraUtils.keySpace, fieldCacheKey, fieldCacheParent, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE)); if (rows.isEmpty()) return Collections.emptyList(); Row row = rows.get(0); if (row.cf == null) return Collections.emptyList(); return row.cf.getSortedColumns(); }
public ReadCommand copy() { return new SliceFromReadCommand(ksName, key, cfName, timestamp, filter).setIsDigestQuery(isDigestQuery()); }
public ReadCommand deserialize(DataInput in, int version) throws IOException { boolean isDigest = in.readBoolean(); String keyspaceName = in.readUTF(); ByteBuffer key = ByteBufferUtil.readWithShortLength(in); String cfName = in.readUTF(); long timestamp = in.readLong(); CFMetaData metadata = Schema.instance.getCFMetaData(keyspaceName, cfName); if (metadata == null) { String message = String.format("Got slice command for nonexistent table %s.%s. If the table was just " + "created, this is likely due to the schema not being fully propagated. Please wait for schema " + "agreement on table creation.", keyspaceName, cfName); throw new UnknownColumnFamilyException(message, null); } SliceQueryFilter filter = metadata.comparator.sliceQueryFilterSerializer().deserialize(in, version); return new SliceFromReadCommand(keyspaceName, key, cfName, timestamp, filter).setIsDigestQuery(isDigest); }
protected boolean containsPreviousLast(Row first) { if (lastReturned == null) return false; Cell firstCell = isReversed() ? lastCell(first.cf) : firstNonStaticCell(first.cf); // If the row was containing only static columns it has already been returned and we can skip it. if (firstCell == null) return true; CFMetaData metadata = Schema.instance.getCFMetaData(command.getKeyspace(), command.getColumnFamilyName()); // Note: we only return true if the column is the lastReturned *and* it is live. If it is deleted, it is ignored by the // rest of the paging code (it hasn't been counted as live in particular) and we want to act as if it wasn't there. return !first.cf.deletionInfo().isDeleted(firstCell) && firstCell.isLive(timestamp()) && firstCell.name().isSameCQL3RowAs(metadata.comparator, lastReturned); }
@Override public Row maybeTrim(Row row) { if ((row == null) || (row.cf == null)) return row; return new Row(row.key, filter.trim(row.cf, getOriginalRequestedCount(), timestamp)); }
public void serialize(ReadCommand rm, DataOutputPlus out, int version) throws IOException { SliceFromReadCommand realRM = (SliceFromReadCommand)rm; out.writeBoolean(realRM.isDigestQuery()); out.writeUTF(realRM.ksName); ByteBufferUtil.writeWithShortLength(realRM.key, out); out.writeUTF(realRM.cfName); out.writeLong(realRM.timestamp); CFMetaData metadata = Schema.instance.getCFMetaData(realRM.ksName, realRM.cfName); metadata.comparator.sliceQueryFilterSerializer().serialize(realRM.filter, out, version); }
@Override public ReadCommand maybeGenerateRetryCommand(RowDataResolver resolver, Row row) { int maxLiveColumns = resolver.getMaxLiveCount(); int count = filter.count; // We generate a retry if at least one node reply with count live columns but after merge we have less // than the total number of column we are interested in (which may be < count on a retry). // So in particular, if no host returned count live columns, we know it's not a short read. if (maxLiveColumns < count) return null; int liveCountInRow = row == null || row.cf == null ? 0 : filter.getLiveCount(row.cf, timestamp); if (liveCountInRow < getOriginalRequestedCount()) { // We asked t (= count) live columns and got l (=liveCountInRow) ones. // From that, we can estimate that on this row, for x requested // columns, only l/t end up live after reconciliation. So for next // round we want to ask x column so that x * (l/t) == t, i.e. x = t^2/l. int retryCount = liveCountInRow == 0 ? count + 1 : ((count * count) / liveCountInRow) + 1; SliceQueryFilter newFilter = filter.withUpdatedCount(retryCount); return new RetriedSliceFromReadCommand(ksName, key, cfName, timestamp, newFilter, getOriginalRequestedCount()); } return null; }
List<Row> rows = CassandraUtils.robustRead(CassandraUtils.consistency, new SliceFromReadCommand( CassandraUtils.keySpace, termsListKey, fieldColumnFamily, CassandraUtils.createColumnName(startTerm), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, bufferSize)); logger.debug("scanning row: " + ByteBufferUtil.string(rowKey)); reads.add((ReadCommand) new SliceFromReadCommand(CassandraUtils.keySpace, rowKey, columnParent, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE));
ReadCommand cmd = new SliceFromReadCommand(CassandraUtils.keySpace, key, new ColumnParent(CassandraUtils.schemaInfoColumnFamily), startCol, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, pageSize);
"shards".getBytes("UTF-8")); ReadCommand cmd = new SliceFromReadCommand(CassandraUtils.keySpace, key, new ColumnParent( CassandraUtils.schemaInfoColumnFamily), ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE);
public void deleteDocuments(String indexName, Term term, boolean autoCommit) throws CorruptIndexException, IOException { ColumnParent cp = new ColumnParent(CassandraUtils.termVecColumnFamily); ByteBuffer key = CassandraUtils.hashKeyBytes(indexName.getBytes("UTF-8"), CassandraUtils.delimeterBytes, term .field().getBytes("UTF-8"), CassandraUtils.delimeterBytes, term.text().getBytes("UTF-8")); ReadCommand rc = new SliceFromReadCommand(CassandraUtils.keySpace, key, cp, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE); List<Row> rows = CassandraUtils.robustRead(CassandraUtils.consistency, rc); // delete by documentId for (Row row : rows) { if (row.cf != null) { Collection<IColumn> columns = row.cf.getSortedColumns(); for (IColumn col : columns) { deleteLucandraDocument(indexName, CassandraUtils.readVInt(col.name()), autoCommit); } } } }
readCommands.add(new SliceFromReadCommand(CassandraUtils.keySpace, key, columnParent, ByteBufferUtil.EMPTY_BYTE_BUFFER, CassandraUtils.finalTokenBytes, false, Integer.MAX_VALUE));
SliceQueryFilter sqf = new SliceQueryFilter(startComposite, endComposite, false, query.getLimit() + (query.hasLimit()?1:0)); ReadCommand sliceCmd = new SliceFromReadCommand(keyspace, query.getKey().asByteBuffer(), columnFamily, nowMillis, sqf);
public SliceFromReadCommand withUpdatedFilter(SliceQueryFilter newFilter) { return new SliceFromReadCommand(ksName, key, cfName, timestamp, newFilter); }
public static ReadCommand create(String ksName, ByteBuffer key, String cfName, long timestamp, IDiskAtomFilter filter) { if (filter instanceof SliceQueryFilter) return new SliceFromReadCommand(ksName, key, cfName, timestamp, (SliceQueryFilter)filter); else return new SliceByNamesReadCommand(ksName, key, cfName, timestamp, (NamesQueryFilter)filter); }
SliceFromReadCommand command = new SliceFromReadCommand(cfs.metadata.ksName, key, cfs.name, System.currentTimeMillis(), new IdentityQueryFilter()); final SliceQueryPager pager = new SliceQueryPager(command, null, null, true);
/** * Convenience method that count (live) cells/rows for a given slice of a row, but page underneath. */ public static int countPaged(String keyspace, String columnFamily, ByteBuffer key, SliceQueryFilter filter, ConsistencyLevel consistencyLevel, ClientState cState, final int pageSize, long now) throws RequestValidationException, RequestExecutionException { SliceFromReadCommand command = new SliceFromReadCommand(keyspace, key, columnFamily, now, filter); final SliceQueryPager pager = new SliceQueryPager(command, consistencyLevel, cState, false); ColumnCounter counter = filter.columnCounter(Schema.instance.getCFMetaData(keyspace, columnFamily).comparator, now); while (!pager.isExhausted()) { List<Row> next = pager.fetchPage(pageSize); if (!next.isEmpty()) counter.countAll(next.get(0).cf); } return counter.live(); } }
ReadCommand readCommand = new SliceFromReadCommand(ks, rKey, new ColumnParent(cf), sname, ename, reversed, count); readCommand.setDigestQuery(false); commands.add(readCommand);
long now = System.currentTimeMillis(); for (ByteBuffer key : partitionKeys) commands.add(new SliceFromReadCommand(keyspace(), key, columnFamily(),
SliceQueryFilter sqf = new SliceQueryFilter(startComposite, endComposite, false, query.getLimit() + (query.hasLimit()?1:0)); ReadCommand sliceCmd = new SliceFromReadCommand(keyspace, query.getKey().asByteBuffer(), columnFamily, nowMillis, sqf);