public void submitBatch() { if (metricNamesBatch.size() != 0) { m_clusterConnection.executeAsync(metricNamesBatch); m_batchStats.addNameBatch(metricNamesBatch.size()); } if (rowKeyBatch.size() != 0) { //rowKeyBatch.enableTracing(); m_clusterConnection.executeAsync(rowKeyBatch); m_batchStats.addRowKeyBatch(rowKeyBatch.size()); } for (BatchStatement batchStatement : m_batchMap.values()) { //batchStatement.enableTracing(); if (batchStatement.size() != 0) { m_clusterConnection.execute(batchStatement); //System.out.println(resultSet.getExecutionInfo().getQueryTrace().getTraceId()); m_batchStats.addDatapointsBatch(batchStatement.size()); } } //Catch all in case of a load balancing problem if (dataPointBatch.size() != 0) { m_clusterConnection.execute(dataPointBatch); m_batchStats.addDatapointsBatch(dataPointBatch.size()); } }
if (batch.size() > 0) { ses = session(); ses.execute(tuneStatementExecutionOptions(batch));
@Override public boolean hasChanges() { return this.batch.size() > 0; }
if (batchStatement.size() != 0) { connectionSession.execute(batchStatement); batchStatement.clear();
@Override public boolean hasChanges() { return this.batch.size() > 0; }
@Test(groups = "short") @CassandraVersion( value = "2.0.9", description = "This will only work with C* 2.0.9 (CASSANDRA-7337)") public void casBatchTest() { PreparedStatement st = session().prepare("INSERT INTO test (k, v) VALUES (?, ?) IF NOT EXISTS"); BatchStatement batch = new BatchStatement(); batch.add(new SimpleStatement("INSERT INTO test (k, v) VALUES (?, ?)", "key1", 0)); batch.add(st.bind("key1", 1)); batch.add(st.bind("key1", 2)); assertEquals(3, batch.size()); ResultSet rs = session().execute(batch); Row r = rs.one(); assertTrue(!r.isNull("[applied]")); assertEquals(r.getBool("[applied]"), true); rs = session().execute(batch); r = rs.one(); assertTrue(!r.isNull("[applied]")); assertEquals(r.getBool("[applied]"), false); } }
@Test(groups = "short") public void simpleBatchTest() { try { PreparedStatement st = session().prepare("INSERT INTO test (k, v) VALUES (?, ?)"); BatchStatement batch = new BatchStatement(); batch.add(new SimpleStatement("INSERT INTO test (k, v) VALUES (?, ?)", "key1", 0)); batch.add(st.bind("key1", 1)); batch.add(st.bind("key2", 0)); assertEquals(3, batch.size()); session().execute(batch); ResultSet rs = session().execute("SELECT * FROM test"); Row r; r = rs.one(); assertEquals(r.getString("k"), "key1"); assertEquals(r.getInt("v"), 0); r = rs.one(); assertEquals(r.getString("k"), "key1"); assertEquals(r.getInt("v"), 1); r = rs.one(); assertEquals(r.getString("k"), "key2"); assertEquals(r.getInt("v"), 0); assertTrue(rs.isExhausted()); } catch (UnsupportedFeatureException e) { // This is expected when testing the protocol v1 assertEquals( cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), ProtocolVersion.V1); } }
public void load(Iterator<List<Object>> rows) { PreparedStatement statement = session.prepare(insertQuery); BatchStatement batch = createBatchStatement(); while (rows.hasNext()) { if (batch.size() >= batchRowsCount) { session.execute(batch); batch = createBatchStatement(); } List<Object> row = rows.next(); checkState(row.size() == columnsCount, "values count in a row is expected to be %d, but found: %d", columnsCount, row.size()); batch.add(statement.bind(row.toArray())); } if (batch.size() > 0) { session.execute(batch); } }
public void load(Iterator<List<Object>> rows) { PreparedStatement statement = session.prepare(insertQuery); BatchStatement batch = createBatchStatement(); while (rows.hasNext()) { if (batch.size() >= batchRowsCount) { session.execute(batch); batch = createBatchStatement(); } List<Object> row = rows.next(); checkState(row.size() == columnsCount, "values count in a row is expected to be %d, but found: %d", columnsCount, row.size()); batch.add(statement.bind(row.toArray())); } if (batch.size() > 0) { session.execute(batch); } }
private void insertStatement(final GeoWaveRow row, final BoundStatement statement) { if (ASYNC) { if (batchSize > 1) { final BatchStatement currentBatch = addStatement(row, statement); synchronized (currentBatch) { if (currentBatch.size() >= batchSize) { writeBatch(currentBatch); } } } else { try { executeAsync(statement); } catch (final InterruptedException e) { LOGGER.warn("async write semaphore interrupted", e); writeSemaphore.release(); } } } else { session.execute(statement); } }
@Override public Observable<Integer> insertStringData(Metric<String> metric, int ttl, int maxSize) { return Observable.from(metric.getDataPoints()) .compose(mapStringDatapoint(metric, ttl, maxSize)) .compose(new BatchStatementTransformer()) .flatMap(batch -> rxSession.execute(batch).map(resultSet -> batch.size())); }
@Override public Observable<Integer> insertStringData(Metric<String> metric, int ttl, int maxSize) { return Observable.from(metric.getDataPoints()) .compose(mapStringDatapoint(metric, ttl, maxSize)) .compose(new BatchStatementTransformer()) .flatMap(batch -> rxSession.execute(batch).map(resultSet -> batch.size())); }
@Override public <T> Observable<Integer> updateMetricsIndex(Observable<Metric<T>> metrics) { return metrics.map(Metric::getMetricId) .map(id -> updateMetricsIndex.bind(id.getTenantId(), id.getType().getCode(), id.getName())) .compose(new BatchStatementTransformer()) .flatMap(batch -> rxSession.execute(batch).map(resultSet -> batch.size())); }
@Override public <T> Observable<Integer> updateMetricsIndex(Observable<Metric<T>> metrics) { return metrics.map(Metric::getMetricId) .map(id -> updateMetricsIndex.bind(id.getTenantId(), id.getType().getCode(), id.getName())) .compose(new BatchStatementTransformer()) .flatMap(batch -> rxSession.execute(batch).map(resultSet -> batch.size())); }
private Observable.Transformer<BoundStatement, Integer> applyMicroBatching() { return tObservable -> tObservable .groupBy(b -> { ByteBuffer routingKey = b.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, codecRegistry); Token token = metadata.newToken(routingKey); for (TokenRange tokenRange : session.getCluster().getMetadata().getTokenRanges()) { if (tokenRange.contains(token)) { return tokenRange; } } log.warn("Unable to find any Cassandra node to insert token " + token.toString()); return session.getCluster().getMetadata().getTokenRanges().iterator().next(); }) .flatMap(g -> g.compose(new BoundBatchStatementTransformer())) .flatMap(batch -> rxSession .execute(batch) .compose(applyInsertRetryPolicy()) .map(resultSet -> batch.size()) ); }
public void store(TSDRMetricRecord mr){ //create metric key String tsdrKey = FormatUtil.getTSDRMetricKey(mr); TSDRCacheEntry cacheEntry = cache.getCacheEntry(tsdrKey); //if it does not exist, create it if(cacheEntry==null){ cacheEntry = cache.addTSDRCacheEntry(tsdrKey); } RegularStatement st = QueryBuilder.insertInto("tsdr","MetricVal"). value("KeyA",cacheEntry.getMd5ID().getMd5Long1()). value("KeyB",cacheEntry.getMd5ID().getMd5Long2()). value("Time",mr.getTimeStamp()). value("value",mr.getMetricValue().doubleValue()); this.batch.add(st); if(this.batch.size()>=MAX_BATCH_SIZE){ this.executeBatch(); this.startBatch(); } }
public void store(TSDRLogRecord lr){ //create log key String tsdrKey = FormatUtil.getTSDRLogKey(lr); TSDRCacheEntry cacheEntry = cache.getCacheEntry(tsdrKey); //if it does not exist, create it if(cacheEntry==null){ cacheEntry = cache.addTSDRCacheEntry(tsdrKey); } RegularStatement st = QueryBuilder.insertInto("tsdr","MetricLog"). value("KeyA",cacheEntry.getMd5ID().getMd5Long1()). value("KeyB",cacheEntry.getMd5ID().getMd5Long2()). value("Time",lr.getTimeStamp()). value("xIndex",lr.getIndex()). value("value",lr.getRecordFullText()); this.batch.add(st); if(this.batch.size()>=MAX_BATCH_SIZE){ this.executeBatch(); this.startBatch(); } }
public void store(TSDRBinaryRecord lr){ //create log key String tsdrKey = FormatUtil.getTSDRBinaryKey(lr); TSDRCacheEntry cacheEntry = cache.getCacheEntry(tsdrKey); //if it does not exist, create it if(cacheEntry==null){ cacheEntry = cache.addTSDRCacheEntry(tsdrKey); } RegularStatement st = QueryBuilder.insertInto("tsdr","MetricBlob"). value("KeyA",cacheEntry.getMd5ID().getMd5Long1()). value("KeyB",cacheEntry.getMd5ID().getMd5Long2()). value("Time",lr.getTimeStamp()). value("xIndex",lr.getIndex()). value("value",lr.getData()); this.batch.add(st); if(this.batch.size()>=MAX_BATCH_SIZE){ this.executeBatch(); this.startBatch(); } }