/** * Repartitions the data (via a shuffle) based upon the replication of the given {@code keyspaceName} * and {@code tableName}. Calling this method before using joinWithCassandraTable will ensure that * requests will be coordinator local. {@code partitionsPerHost} Controls the number of Spark * Partitions that will be created in this repartitioning event. The calling RDD must have rows that * can be converted into the partition key of the given Cassandra Table. */ public JavaRDD<T> repartitionByCassandraReplica( String keyspaceName, String tableName, int partitionsPerHost, ColumnSelector partitionkeyMapper, RowWriterFactory<T> rowWriterFactory ) { CassandraConnector connector = defaultConnector(); ClassTag<T> ctT = rdd.toJavaRDD().classTag(); CassandraPartitionedRDD<T> newRDD = rddFunctions.repartitionByCassandraReplica( keyspaceName, tableName, partitionsPerHost, partitionkeyMapper, connector, ctT, rowWriterFactory); return new JavaRDD<>(newRDD, ctT); }
/** * Repartitions the data (via a shuffle) based upon the replication of the given {@code keyspaceName} * and {@code tableName}. Calling this method before using joinWithCassandraTable will ensure that * requests will be coordinator local. {@code partitionsPerHost} Controls the number of Spark * Partitions that will be created in this repartitioning event. The calling RDD must have rows that * can be converted into the partition key of the given Cassandra Table. */ public JavaRDD<T> repartitionByCassandraReplica( String keyspaceName, String tableName, int partitionsPerHost, ColumnSelector partitionkeyMapper, RowWriterFactory<T> rowWriterFactory ) { CassandraConnector connector = defaultConnector(); ClassTag<T> ctT = rdd.toJavaRDD().classTag(); CassandraPartitionedRDD<T> newRDD = rddFunctions.repartitionByCassandraReplica( keyspaceName, tableName, partitionsPerHost, partitionkeyMapper, connector, ctT, rowWriterFactory); return new JavaRDD<>(newRDD, ctT); }
/** * Repartitions the data (via a shuffle) based upon the replication of the given {@code keyspaceName} * and {@code tableName}. Calling this method before using joinWithCassandraTable will ensure that * requests will be coordinator local. {@code partitionsPerHost} Controls the number of Spark * Partitions that will be created in this repartitioning event. The calling RDD must have rows that * can be converted into the partition key of the given Cassandra Table. */ public JavaRDD<T> repartitionByCassandraReplica( String keyspaceName, String tableName, int partitionsPerHost, ColumnSelector partitionkeyMapper, RowWriterFactory<T> rowWriterFactory ) { CassandraConnector connector = defaultConnector(); ClassTag<T> ctT = rdd.toJavaRDD().classTag(); CassandraPartitionedRDD<T> newRDD = rddFunctions.repartitionByCassandraReplica( keyspaceName, tableName, partitionsPerHost, partitionkeyMapper, connector, ctT, rowWriterFactory); return new JavaRDD<>(newRDD, ctT); }
/** * Repartitions the data (via a shuffle) based upon the replication of the given {@code keyspaceName} * and {@code tableName}. Calling this method before using joinWithCassandraTable will ensure that * requests will be coordinator local. {@code partitionsPerHost} Controls the number of Spark * Partitions that will be created in this repartitioning event. The calling RDD must have rows that * can be converted into the partition key of the given Cassandra Table. */ public JavaRDD<T> repartitionByCassandraReplica( String keyspaceName, String tableName, int partitionsPerHost, ColumnSelector partitionkeyMapper, RowWriterFactory<T> rowWriterFactory ) { CassandraConnector connector = defaultConnector(); ClassTag<T> ctT = rdd.toJavaRDD().classTag(); CassandraPartitionedRDD<T> newRDD = rddFunctions.repartitionByCassandraReplica( keyspaceName, tableName, partitionsPerHost, partitionkeyMapper, connector, ctT, rowWriterFactory); return new JavaRDD<>(newRDD, ctT); }
/** * Repartitions the data (via a shuffle) based upon the replication of the given {@code keyspaceName} * and {@code tableName}. Calling this method before using joinWithCassandraTable will ensure that * requests will be coordinator local. {@code partitionsPerHost} Controls the number of Spark * Partitions that will be created in this repartitioning event. The calling RDD must have rows that * can be converted into the partition key of the given Cassandra Table. */ public JavaRDD<T> repartitionByCassandraReplica( String keyspaceName, String tableName, int partitionsPerHost, ColumnSelector partitionkeyMapper, RowWriterFactory<T> rowWriterFactory ) { CassandraConnector connector = defaultConnector(); ClassTag<T> ctT = rdd.toJavaRDD().classTag(); CassandraPartitionedRDD<T> newRDD = rddFunctions.repartitionByCassandraReplica( keyspaceName, tableName, partitionsPerHost, partitionkeyMapper, connector, ctT, rowWriterFactory); return new JavaRDD<>(newRDD, ctT); }
ClassTag<R> classTagR = JavaApiHelper.getClassTag(rowReaderFactory.targetClass()); CassandraConnector connector = defaultConnector(); Option<ClusteringOrder> clusteringOrder = Option.empty(); Option<CassandraLimit> limit = Option.empty();
ClassTag<R> classTagR = JavaApiHelper.getClassTag(rowReaderFactory.targetClass()); CassandraConnector connector = defaultConnector(); Option<ClusteringOrder> clusteringOrder = Option.empty(); Option<CassandraLimit> limit = Option.empty();
ClassTag<R> classTagR = JavaApiHelper.getClassTag(rowReaderFactory.targetClass()); CassandraConnector connector = defaultConnector(); Option<ClusteringOrder> clusteringOrder = Option.empty(); Option<Object> limit = Option.empty();
ClassTag<R> classTagR = JavaApiHelper.getClassTag(rowReaderFactory.targetClass()); CassandraConnector connector = defaultConnector(); Option<ClusteringOrder> clusteringOrder = Option.empty(); Option<Object> limit = Option.empty();
ClassTag<R> classTagR = JavaApiHelper.getClassTag(rowReaderFactory.targetClass()); CassandraConnector connector = defaultConnector(); Option<ClusteringOrder> clusteringOrder = Option.empty(); Option<CassandraLimit> limit = Option.empty();