public TokenFct(CFMetaData cfm) { super("token", cfm.partitioner.getTokenValidator(), getKeyTypes(cfm)); this.cfm = cfm; }
public TokenFct(CFMetaData cfm) { super("token", cfm.partitioner.getTokenValidator(), getKeyTypes(cfm)); this.cfm = cfm; }
public TokenFct(CFMetaData cfm) { super("token", cfm.partitioner.getTokenValidator(), getKeyTypes(cfm)); this.cfm = cfm; }
public TokenFct(CFMetaData cfm) { super("token", partitioner.getTokenValidator(), getKeyTypes(cfm)); this.cfm = cfm; }
public MultiRangeRowIterator() { MultiRangeSplit cfSplit = (MultiRangeSplit) split; if (session == null) throw new RuntimeException("Can't create connection session"); validatorType = partitioner.getTokenValidator(); if (logger.isDebugEnabled()) { logger.debug("QUERY: " + cqlQuery); logger.debug("Multi Range length is " + cfSplit.getLength()); } logger.info("Created new MultiRangeRowIterator"); tokenRanges = cfSplit.getTokenRanges(); currentRange = 0; }
/** * Gets the list of token for each cluster machine.<br/> * The concrete class of the token depends on the partitioner used.<br/> * * @param query the query to execute against the given session to obtain the list of tokens. * @param sessionWithHost the pair object containing both the session and the name of the machine to which we're connected to. * @param partitioner the partitioner used in the cluster. * @return a map containing, for each cluster machine, the list of tokens. Tokens are not returned in any particular * order. */ static Map<String, Iterable<Comparable>> fetchTokens(String query, final Pair<Session, String> sessionWithHost, IPartitioner partitioner) { ResultSet rSet = sessionWithHost.left.execute(query); final AbstractType tkValidator = partitioner.getTokenValidator(); final Map<String, Iterable<Comparable>> tokens = Maps.newHashMap(); Iterable<Pair<String, Iterable<Comparable>>> pairs = transform(rSet.all(), new FetchTokensRowPairFunction(sessionWithHost, tkValidator)); for (Pair<String, Iterable<Comparable>> pair : pairs) { tokens.put(pair.left, pair.right); } return tokens; }
/** * Given a token, fetches the list of replica machines holding that token. * * @param token the token whose replicas we want to fetch. * @param session the connection to the cluster. * @param partitioner the partitioner used in the cluster. * @return the list of replica machines holding that token. */ private static List<String> initReplicas( final Comparable token, final Session session, final IPartitioner partitioner) { final AbstractType tkValidator = partitioner.getTokenValidator(); final Metadata metadata = session.getCluster().getMetadata(); @SuppressWarnings("unchecked") Set<Host> replicas = metadata.getReplicas(quote(session.getLoggedKeyspace()), ByteBuffer.wrap(token.toString().getBytes())); return Lists.newArrayList(Iterables.transform(replicas, new Function<Host, String>() { @Nullable @Override public String apply( @Nullable Host input) { assert input != null; return input.getAddress().getHostName(); } })); }
/** * Builds a new {@link ThriftRangeUtils}. * * @param partitioner the partitioner. * @param host the host address. * @param rpcPort the host RPC port. * @param keyspace the keyspace name. * @param columnFamily the column family name. * @param splitSize the number of rows per split. */ public ThriftRangeUtils(IPartitioner partitioner, String host, int rpcPort, String keyspace, String columnFamily, int splitSize) { this.host = host; this.rpcPort = rpcPort; this.splitSize = splitSize; this.keyspace = keyspace; this.columnFamily = columnFamily; tokenType = partitioner.getTokenValidator(); tokenFactory = partitioner.getTokenFactory(); minToken = (Comparable) partitioner.getMinimumToken().token; }
public SingleRangeRowIterator() { ColumnFamilySplit cfSplit = (ColumnFamilySplit) split; if (session == null) throw new RuntimeException("Can't create connection session"); AbstractType type = partitioner.getTokenValidator(); if (logger.isDebugEnabled()) { logger.debug("QUERY: " + cqlQuery); logger.debug("START: " + cfSplit.getStartToken()); logger.debug("END: " + cfSplit.getEndToken()); } ResultSet rs = session.execute(cqlQuery, type.compose(type.fromString(cfSplit.getStartToken())), type.compose(type.fromString(cfSplit.getEndToken()))); for (ColumnMetadata meta : cluster.getMetadata().getKeyspace(keyspace).getTable(cfName).getPartitionKey()) partitionBoundColumns.put(meta.getName(), Boolean.TRUE); rows = rs.iterator(); }
firstColumn.cfName, new ColumnIdentifier("partition key token", true), cfm.partitioner.getTokenValidator()));
firstColumn.cfName, new ColumnIdentifier("partition key token", true), cfm.partitioner.getTokenValidator()));
public RowIterator() { AbstractType type = partitioner.getTokenValidator(); ResultSet rs = session.execute(cqlQuery, type.compose(type.fromString(split.getStartToken())), type.compose(type.fromString(split.getEndToken())) ); for (ColumnMetadata meta : cluster.getMetadata().getKeyspace(quote(keyspace)).getTable(quote(cfName)).getPartitionKey()) partitionBoundColumns.put(meta.getName(), Boolean.TRUE); rows = rs.iterator(); }
public RowIterator() { AbstractType type = partitioner.getTokenValidator(); ResultSet rs = session.execute(cqlQuery, type.compose(type.fromString(split.getStartToken())), type.compose(type.fromString(split.getEndToken())) ); for (ColumnMetadata meta : cluster.getMetadata().getKeyspace(quote(keyspace)).getTable(quote(cfName)).getPartitionKey()) partitionBoundColumns.put(meta.getName(), Boolean.TRUE); rows = rs.iterator(); }
public RowIterator() { AbstractType type = partitioner.getTokenValidator(); ResultSet rs = session.execute(cqlQuery, type.compose(type.fromString(split.getStartToken())), type.compose(type.fromString(split.getEndToken())) ); for (ColumnMetadata meta : cluster.getMetadata().getKeyspace(quote(keyspace)).getTable(quote(cfName)).getPartitionKey()) partitionBoundColumns.put(meta.getName(), Boolean.TRUE); rows = rs.iterator(); }
public RowIterator() { AbstractType type = partitioner.getTokenValidator(); ResultSet rs = session.execute(cqlQuery, type.compose(type.fromString(split.getStartToken())), type.compose(type.fromString(split.getEndToken())) ); for (ColumnMetadata meta : cluster.getMetadata().getKeyspace(quote(keyspace)).getTable(quote(cfName)).getPartitionKey()) partitionBoundColumns.put(meta.getName(), Boolean.TRUE); rows = rs.iterator(); }
DistinctKeyIterator() { AbstractType type = partitioner.getTokenValidator(); Object startToken = type.compose(type.fromString(split.getStartToken())); Object endToken = type.compose(type.fromString(split.getEndToken())); SimpleStatement statement = new SimpleStatement(cqlQuery, startToken, endToken); rowIterator = session.execute(statement).iterator(); for (ColumnMetadata meta : cluster.getMetadata().getKeyspace(quote(keyspace)).getTable(quote(cfName)).getPartitionKey()) { partitionBoundColumns.put(meta.getName(), Boolean.TRUE); } }
DistinctKeyIterator() { AbstractType type = partitioner.getTokenValidator(); Object startToken = type.compose(type.fromString(split.getStartToken())); Object endToken = type.compose(type.fromString(split.getEndToken())); SimpleStatement statement = new SimpleStatement(cqlQuery, startToken, endToken); rowIterator = session.execute(statement).iterator(); for (ColumnMetadata meta : cluster.getMetadata().getKeyspace(quote(keyspace)).getTable(quote(cfName)).getPartitionKey()) { partitionBoundColumns.put(meta.getName(), Boolean.TRUE); } }
DistinctKeyIterator() { AbstractType type = partitioner.getTokenValidator(); Object startToken = type.compose(type.fromString(split.getStartToken())); Object endToken = type.compose(type.fromString(split.getEndToken())); SimpleStatement statement = new SimpleStatement(cqlQuery, startToken, endToken); rowIterator = session.execute(statement).iterator(); for (ColumnMetadata meta : cluster.getMetadata().getKeyspace(quote(keyspace)).getTable(quote(cfName)).getPartitionKey()) { partitionBoundColumns.put(meta.getName(), Boolean.TRUE); } }
firstColumn.cfName, new ColumnIdentifier("partition key token", true), cfm.partitioner.getTokenValidator()));
/** * Recursive function that splits a given token range to a given number of token ranges. * * @param range the token range to be splitted. * @param partitioner the cassandra partitioner. * @param bisectFactor the actual number of pieces the original token range will be splitted to. * @param accumulator a token range accumulator (ne */ private static void bisectTokeRange( DeepTokenRange range, final IPartitioner partitioner, final int bisectFactor, final List<DeepTokenRange> accumulator) { final AbstractType tkValidator = partitioner.getTokenValidator(); Token leftToken = partitioner.getTokenFactory().fromByteArray(tkValidator.decompose(range.getStartToken())); Token rightToken = partitioner.getTokenFactory().fromByteArray(tkValidator.decompose(range.getEndToken())); Token midToken = partitioner.midpoint(leftToken, rightToken); Comparable midpoint = (Comparable) tkValidator.compose(tkValidator.fromString(midToken.toString())); DeepTokenRange left = new DeepTokenRange(range.getStartToken(), midpoint, range.getReplicas()); DeepTokenRange right = new DeepTokenRange(midpoint, range.getEndToken(), range.getReplicas()); if (bisectFactor / 2 <= 1) { accumulator.add(left); accumulator.add(right); } else { bisectTokeRange(left, partitioner, bisectFactor / 2, accumulator); bisectTokeRange(right, partitioner, bisectFactor / 2, accumulator); } }