@Override public List<KeyRange> getLocalKeyPartition() throws BackendException { CTConnection conn = null; IPartitioner partitioner = getCassandraPartitioner(); if (!(partitioner instanceof AbstractByteOrderedPartitioner)) throw new UnsupportedOperationException("getLocalKeyPartition() only supported by byte ordered partitioner."); Token.TokenFactory tokenFactory = partitioner.getTokenFactory(); try { // Resist the temptation to describe SYSTEM_KS. It has no ring. // Instead, we'll create our own keyspace (or check that it exists), then describe it. ensureKeyspaceExists(keySpaceName); conn = pool.borrowObject(keySpaceName); List<TokenRange> ranges = conn.getClient().describe_ring(keySpaceName); List<KeyRange> keyRanges = new ArrayList<KeyRange>(ranges.size()); for (TokenRange range : ranges) { if (!NetworkUtil.hasLocalAddress(range.endpoints)) continue; keyRanges.add(CassandraHelper.transformRange(tokenFactory.fromString(range.start_token), tokenFactory.fromString(range.end_token))); } return keyRanges; } catch (Exception e) { throw CassandraThriftKeyColumnValueStore.convertException(e); } finally { pool.returnObjectUnsafe(keySpaceName, conn); } }
public KeyRangeIterator(IPartitioner partitioner, SliceQuery columnSlice, int pageSize, ByteBuffer startKey, ByteBuffer endKey) throws BackendException { super(partitioner, columnSlice, pageSize, partitioner.getToken(startKey), partitioner.getToken(endKey), true); Preconditions.checkArgument(partitioner instanceof AbstractByteOrderedPartitioner); // Get first slice with key range instead of token range. Token // ranges are start-exclusive, key ranges are start-inclusive. Both // are end-inclusive. If we don't make the call below, then we will // erroneously miss startKey. List<KeySlice> ks = getKeySlice(startKey, endKey, columnSlice, pageSize); this.ksIter = checkFreshSlices(ks).iterator(); } }
public AllTokensIterator(IPartitioner partitioner, SliceQuery columnSlice, int pageSize) { super(partitioner, columnSlice, pageSize, partitioner.getMinimumToken(), partitioner.getMinimumToken(), false); } }
if (jobKeyRange != null) { if (jobKeyRange.start_key != null) { if (!partitioner.preservesOrder()) throw new UnsupportedOperationException("KeyRange based on keys can only be used with a order preserving paritioner"); if (jobKeyRange.start_token != null) if (jobKeyRange.end_token != null) throw new IllegalArgumentException("only start_key supported"); jobRange = new Range<>(partitioner.getToken(jobKeyRange.start_key), partitioner.getToken(jobKeyRange.end_key), partitioner); } else if (jobKeyRange.start_token != null) { jobRange = new Range<>(partitioner.getTokenFactory().fromString(jobKeyRange.start_token), partitioner.getTokenFactory().fromString(jobKeyRange.end_token), partitioner); } else { Range<Token> dhtRange = new Range<Token>(partitioner.getTokenFactory().fromString(range.start_token), partitioner.getTokenFactory().fromString(range.end_token), partitioner); range.start_token = partitioner.getTokenFactory().toString(intersection.left); range.end_token = partitioner.getTokenFactory().toString(intersection.right);
public static int compareTo(IPartitioner partitioner, ByteBuffer key, PartitionPosition position) { // delegate to Token.KeyBound if needed if (!(position instanceof DecoratedKey)) return -position.compareTo(partitioner.decorateKey(key)); DecoratedKey otherKey = (DecoratedKey) position; int cmp = partitioner.getToken(key).compareTo(otherKey.getToken()); return cmp == 0 ? ByteBufferUtil.compareUnsigned(key, otherKey.getKey()) : cmp; }
public static PartitionPosition get(ByteBuffer key, IPartitioner p) { return key == null || key.remaining() == 0 ? p.getMinimumToken().minKeyBound() : p.decorateKey(key); } }
public TokenFct(CFMetaData cfm) { super("token", cfm.partitioner.getTokenValidator(), getKeyTypes(cfm)); this.cfm = cfm; }
private Token getTokenBound(Bound b, QueryOptions options, IPartitioner p) { if (!partitionKeyRestrictions.hasBound(b)) return p.getMinimumToken(); ByteBuffer value = partitionKeyRestrictions.bounds(b, options).get(0); checkNotNull(value, "Invalid null token value"); return p.getTokenFactory().fromByteArray(value); }
/** * We avoid calculating for wrap around ranges, instead we use the actual max token, and then, when translating * to PartitionPositions, we include tokens from .minKeyBound to .maxKeyBound to make sure we include all tokens. */ private Token token(Token t) { return t.equals(partitioner.getMinimumToken()) ? partitioner.getMaximumToken() : t; }
static void addOwnership(final TokenMetadata tokenMetadata, final AbstractReplicationStrategy rs, Token current, Token next, Map<InetAddress, Double> ownership) { double size = current.size(next); Token representative = current.getPartitioner().midpoint(current, next); for (InetAddress n : rs.calculateNaturalEndpoints(representative, tokenMetadata)) { Double v = ownership.get(n); ownership.put(n, v != null ? v + size : size); } }
public static Collection<Token> getRandomTokens(TokenMetadata metadata, int numTokens) { Set<Token> tokens = new HashSet<>(numTokens); while (tokens.size() < numTokens) { Token token = metadata.partitioner.getRandomToken(); if (metadata.getEndpoint(token) == null) tokens.add(token); } logger.info("Generated random tokens. tokens are {}", tokens); return tokens; }
/** * Returns a list of disk boundaries, the result will differ depending on whether vnodes are enabled or not. * * What is returned are upper bounds for the disks, meaning everything from partitioner.minToken up to * getDiskBoundaries(..).get(0) should be on the first disk, everything between 0 to 1 should be on the second disk * etc. * * The final entry in the returned list will always be the partitioner maximum tokens upper key bound */ private static List<PartitionPosition> getDiskBoundaries(List<Range<Token>> sortedLocalRanges, IPartitioner partitioner, Directories.DataDirectory[] dataDirectories) { assert partitioner.splitter().isPresent(); Splitter splitter = partitioner.splitter().get(); boolean dontSplitRanges = DatabaseDescriptor.getNumTokens() > 1; List<Token> boundaries = splitter.splitOwnedRanges(dataDirectories.length, sortedLocalRanges, dontSplitRanges); // If we can't split by ranges, split evenly to ensure utilisation of all disks if (dontSplitRanges && boundaries.size() < dataDirectories.length) boundaries = splitter.splitOwnedRanges(dataDirectories.length, sortedLocalRanges, false); List<PartitionPosition> diskBoundaries = new ArrayList<>(); for (int i = 0; i < boundaries.size() - 1; i++) diskBoundaries.add(boundaries.get(i).maxKeyBound()); diskBoundaries.add(partitioner.getMaximumToken().maxKeyBound()); return diskBoundaries; } }
public Map<InetAddress, Float> getOwnership() { List<Token> sortedTokens = tokenMetadata.sortedTokens(); // describeOwnership returns tokens in an unspecified order, let's re-order them Map<Token, Float> tokenMap = new TreeMap<Token, Float>(tokenMetadata.partitioner.describeOwnership(sortedTokens)); Map<InetAddress, Float> nodeMap = new LinkedHashMap<>(); for (Map.Entry<Token, Float> entry : tokenMap.entrySet()) { InetAddress endpoint = tokenMetadata.getEndpoint(entry.getKey()); Float tokenOwnership = entry.getValue(); if (nodeMap.containsKey(endpoint)) nodeMap.put(endpoint, nodeMap.get(endpoint) + tokenOwnership); else nodeMap.put(endpoint, tokenOwnership); } return nodeMap; }
if (!tree.partitioner().preservesOrder())
public DecoratedKey decorateKey(ByteBuffer key) { return getPartitioner().decorateKey(key); }
public Token maxValue() { return getPartitioner().getMaximumToken(); }
if (!partitioner.preservesOrder()) throw new UnsupportedOperationException("KeyRange based on keys can only be used with a order preserving paritioner"); if (jobKeyRange.start_token != null) if (jobKeyRange.end_token != null) throw new IllegalArgumentException("only start_key supported"); jobRange = new Range<>(partitioner.getToken(jobKeyRange.start_key), partitioner.getToken(jobKeyRange.end_key), partitioner); jobRange = new Range<>(partitioner.getTokenFactory().fromString(jobKeyRange.start_token), partitioner.getTokenFactory().fromString(jobKeyRange.end_token), partitioner); Range<Token> dhtRange = new Range<Token>(partitioner.getTokenFactory().fromString(range.start_token), partitioner.getTokenFactory().fromString(range.end_token), partitioner); range.start_token = partitioner.getTokenFactory().toString(intersection.left); range.end_token = partitioner.getTokenFactory().toString(intersection.right);
public static int compareTo(IPartitioner partitioner, ByteBuffer key, PartitionPosition position) { // delegate to Token.KeyBound if needed if (!(position instanceof DecoratedKey)) return -position.compareTo(partitioner.decorateKey(key)); DecoratedKey otherKey = (DecoratedKey) position; int cmp = partitioner.getToken(key).compareTo(otherKey.getToken()); return cmp == 0 ? ByteBufferUtil.compareUnsigned(key, otherKey.getKey()) : cmp; }
public static RowPosition get(ByteBuffer key, IPartitioner p) { return key == null || key.remaining() == 0 ? p.getMinimumToken().minKeyBound() : p.decorateKey(key); } }
public TokenFct(CFMetaData cfm) { super("token", cfm.partitioner.getTokenValidator(), getKeyTypes(cfm)); this.cfm = cfm; }