Refine search
int pageSize, long nowMillis) throws BackendException { IPartitioner partitioner = StorageService.getPartitioner(); RowPosition startPosition = start.minKeyBound(partitioner); RowPosition endPosition = end.minKeyBound(partitioner);
OAuthService service = new ServiceBuilder() .provider(TwitterApi.class) .apiKey("your_api_key") .apiSecret("your_api_secret") .build(); ... Token requestToken = service.getRequestToken(); String your_token = requestToken.getToken(); ... Verifier verifier = new Verifier("your_previously_retrieved_verifier"); Token accessToken = service.getAccessToken(requestToken, verifier);
public String getToken() { if (StorageService.instance.isClientMode()) return CassandraUtils.fakeToken; return StorageService.instance.getTokenMetadata().getToken(FBUtilities.getLocalAddress()).toString(); }
private AbstractBounds<PartitionPosition> getPartitionKeyBoundsForTokenRestrictions(IPartitioner p, QueryOptions options) { Token startToken = getTokenBound(Bound.START, options, p); Token endToken = getTokenBound(Bound.END, options, p); boolean includeStart = partitionKeyRestrictions.isInclusive(Bound.START); boolean includeEnd = partitionKeyRestrictions.isInclusive(Bound.END); /* * If we ask SP.getRangeSlice() for (token(200), token(200)], it will happily return the whole ring. * However, wrapping range doesn't really make sense for CQL, and we want to return an empty result in that * case (CASSANDRA-5573). So special case to create a range that is guaranteed to be empty. * * In practice, we want to return an empty result set if either startToken > endToken, or both are equal but * one of the bound is excluded (since [a, a] can contains something, but not (a, a], [a, a) or (a, a)). * Note though that in the case where startToken or endToken is the minimum token, then this special case * rule should not apply. */ int cmp = startToken.compareTo(endToken); if (!startToken.isMinimum() && !endToken.isMinimum() && (cmp > 0 || (cmp == 0 && (!includeStart || !includeEnd)))) return null; PartitionPosition start = includeStart ? startToken.minKeyBound() : startToken.maxKeyBound(); PartitionPosition end = includeEnd ? endToken.maxKeyBound() : endToken.minKeyBound(); return new Range<>(start, end); }
/** * Compute a bounds of keys corresponding to a given bounds of token. */ public static Bounds<PartitionPosition> makeRowBounds(Token left, Token right) { return new Bounds<PartitionPosition>(left.minKeyBound(), right.maxKeyBound()); }
Token.TokenFactory tf = getTokenFactory(); ? getRangeToAddressMapInLocalDC(keyspace) : getRangeToAddressMap(keyspace); TokenRange tr = new TokenRange(tf.toString(range.left.getToken()), tf.toString(range.right.getToken()), endpoints) .setEndpoint_details(epDetails) .setRpc_endpoints(rpc_endpoints);
public KeyBound maxKeyBound() { /* * For each token, we needs both minKeyBound and maxKeyBound * because a token corresponds to a range of keys. But the minimun * token corresponds to no key, so it is valid and actually much * simpler to associate the same value for minKeyBound and * maxKeyBound for the minimun token. */ if (isMinimum()) return minKeyBound(); return new KeyBound(this, false); }
public RowPosition deserialize(DataInput in) throws IOException { Kind kind = Kind.fromOrdinal(in.readByte()); if (kind == Kind.ROW_KEY) { ByteBuffer k = ByteBufferUtil.readWithShortLength(in); return StorageService.getPartitioner().decorateKey(k); } else { Token t = Token.serializer.deserialize(in); return kind == Kind.MIN_BOUND ? t.minKeyBound() : t.maxKeyBound(); } }
throw new org.apache.cassandra.exceptions.InvalidRequestException("start token + end key is not a supported key range"); IPartitioner p = StorageService.getPartitioner(); if (startToken.compareTo(endToken) > 0 && !endToken.isMinimum(p)) RowPosition stop = p.getTokenFactory().fromString(range.end_token).maxKeyBound(p); if (RowPosition.ForKey.get(range.start_key, p).compareTo(stop) > 0 && !stop.isMinimum()) throw new org.apache.cassandra.exceptions.InvalidRequestException("Start key's token sorts after end token");
/** * Attempt delivery to any node for which we have hints. Necessary since we can generate hints even for * nodes which are never officially down/failed. */ private void scheduleAllDeliveries() { logger.debug("Started scheduleAllDeliveries"); // Force a major compaction to get rid of the tombstones and expired hints. Do it once, before we schedule any // individual replay, to avoid N - 1 redundant individual compactions (when N is the number of nodes with hints // to deliver to). compact(); IPartitioner p = StorageService.getPartitioner(); RowPosition minPos = p.getMinimumToken().minKeyBound(); Range<RowPosition> range = new Range<>(minPos, minPos, p); IDiskAtomFilter filter = new NamesQueryFilter(ImmutableSortedSet.<CellName>of()); List<Row> rows = hintStore.getRangeSlice(range, null, filter, Integer.MAX_VALUE, System.currentTimeMillis()); for (Row row : rows) { UUID hostId = UUIDGen.getUUID(row.key.getKey()); InetAddress target = StorageService.instance.getTokenMetadata().getEndpointForHostId(hostId); // token may have since been removed (in which case we have just read back a tombstone) if (target != null) scheduleHintDelivery(target, false); } logger.debug("Finished scheduleAllDeliveries"); }
public CompactionInfo getCompactionInfo() { long rangesCompleted = 0, rangesTotal = 0; Token lastToken = prevToken; // This approximation is not very accurate, but since we do not have a method which allows us to calculate the // percentage of a range covered by a second range, this is the best approximation that we can calculate. // Instead, we just count the total number of ranges that haven't been seen by the node (we use the order of // the tokens to determine whether they have been seen yet or not), and the total number of ranges that a node // has. for (Range<Token> range : StorageService.instance.getLocalRanges(baseCfs.keyspace.getName())) { rangesTotal++; if ((lastToken != null) && lastToken.compareTo(range.right) > 0) rangesCompleted++; } return new CompactionInfo(baseCfs.metadata, OperationType.VIEW_BUILD, rangesCompleted, rangesTotal, Unit.RANGES, compactionId); }
public static PartitionPosition get(ByteBuffer key, IPartitioner p) { return key == null || key.remaining() == 0 ? p.getMinimumToken().minKeyBound() : p.decorateKey(key); } }
Query newNumericRangesQuery(Range<Token> range) { Long left = (Long) range.left.getTokenValue(); Long right = (Long) range.right.getTokenValue(); return NumberFieldMapper.NumberType.LONG.rangeQuery(TokenFieldMapper.NAME, left == Long.MIN_VALUE ? null : left+1, right == Long.MAX_VALUE ? null : right, true, true, true); }
static void addOwnership(final TokenMetadata tokenMetadata, final AbstractReplicationStrategy rs, Token current, Token next, Map<InetAddress, Double> ownership) { double size = current.size(next); Token representative = current.getPartitioner().midpoint(current, next); for (InetAddress n : rs.calculateNaturalEndpoints(representative, tokenMetadata)) { Double v = ownership.get(n); ownership.put(n, v != null ? v + size : size); } }
public long serializedSize(Token object, int version) { IPartitioner p = object.getPartitioner(); ByteBuffer b = p.getTokenFactory().toByteArray(object); return TypeSizes.sizeof(b.remaining()) + b.remaining(); } }
public boolean isMinimum() { return getToken().isMinimum(); }
/** * Compute a range of keys corresponding to a given range of token. */ public static Range<PartitionPosition> makeRowRange(Token left, Token right) { return new Range<PartitionPosition>(left.maxKeyBound(), right.maxKeyBound()); }
public KeyBound maxKeyBound() { return maxKeyBound(StorageService.getPartitioner()); }
public boolean isMinimum() { return isMinimum(StorageService.getPartitioner()); }