public void flush() { // BUG(1795): We should force batcher to issue RPC call for buffered messages, // so the code below doesn't wait uselessly. ArrayList<ApiFuture<Void>> writesToFlush = new ArrayList<>(); synchronized (writeLock) { writesToFlush.addAll(pendingWrites); } try { ApiFutures.allAsList(writesToFlush).get(FLUSH_WAIT_TIMEOUT_SECONDS, TimeUnit.SECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { throw new RuntimeException(e); } }
List<String> messageIds = ApiFutures.allAsList(messageIdFutures).get();
private void deleteRows() throws InterruptedException, ExecutionException, TimeoutException { Query query = Query.create(tableId).prefix(rowPrefix); List<ApiFuture<Void>> futures = Lists.newArrayList(); ServerStream<Row> rows = dataClient.readRows(query); for (Row row : rows) { ApiFuture<Void> future = dataClient.mutateRowAsync(RowMutation.create(tableId, row.getKey()).deleteRow()); futures.add(future); } ApiFutures.allAsList(futures).get(10, TimeUnit.MINUTES); }
@Test public void getAll() throws Exception { DocumentReference ref1 = randomColl.document("doc1"); DocumentReference ref2 = randomColl.document("doc2"); ApiFutures.allAsList(ImmutableList.of(ref1.set(SINGLE_FIELD_MAP), ref2.set(SINGLE_FIELD_MAP))) .get(); List<DocumentSnapshot> documentSnapshots = firestore.getAll(ref1, ref2).get(); assertEquals(2, documentSnapshots.size()); assertEquals("doc1", documentSnapshots.get(0).getId()); assertEquals(SINGLE_FIELD_OBJECT, documentSnapshots.get(0).toObject(SingleField.class)); assertEquals("doc2", documentSnapshots.get(1).getId()); assertEquals(SINGLE_FIELD_OBJECT, documentSnapshots.get(1).toObject(SingleField.class)); }
@Test public void elementCountTest() { // First request RpcExpectation rpcExpectation1 = RpcExpectation.create(); int i = 0; for (; i < FLUSH_COUNT; i++) { rpcExpectation1.addEntry("key" + i, Code.OK); } service.expectations.add(rpcExpectation1); // Overflow request RpcExpectation rpcExpectation2 = RpcExpectation.create().addEntry("key" + i, Code.OK); service.expectations.add(rpcExpectation2); List<ApiFuture<Void>> results = Lists.newArrayList(); for (int j = 0; j < FLUSH_COUNT + 1; j++) { ApiFuture<Void> result = bulkMutations.add(RowMutation.create(TABLE_ID, "key" + j)); results.add(result); } verifyOk(ApiFutures.allAsList(results)); service.verifyOk(); }
@Test public void test() throws InterruptedException, ExecutionException, TimeoutException { BigtableDataClient client = testEnvRule.env().getDataClient(); // Create some data so that sample row keys has something to show List<ApiFuture<?>> futures = Lists.newArrayList(); for (int i = 0; i < 10; i++) { ApiFuture<Void> future = client.mutateRowAsync( RowMutation.create( testEnvRule.env().getTableId(), testEnvRule.env().getRowPrefix() + "-" + i) .setCell(testEnvRule.env().getFamilyId(), "", "value")); futures.add(future); } ApiFutures.allAsList(futures).get(1, TimeUnit.MINUTES); ApiFuture<List<KeyOffset>> future = client.sampleRowKeysAsync(testEnvRule.env().getTableId()); List<KeyOffset> results = future.get(1, TimeUnit.MINUTES); assertThat(results).isNotEmpty(); assertThat(results.get(results.size() - 1).getOffsetBytes()).isGreaterThan(0L); } }
/** Create cities collection and add sample documents. */ void prepareExamples() throws Exception { // [START fs_retrieve_create_examples] CollectionReference cities = db.collection("cities"); List<ApiFuture<WriteResult>> futures = new ArrayList<>(); futures.add(cities.document("SF").set(new City("San Francisco", "CA", "USA", false, 860000L, Arrays.asList("west_coast", "norcal")))); futures.add(cities.document("LA").set(new City("Los Angeles", "CA", "USA", false, 3900000L, Arrays.asList("west_coast", "socal")))); futures.add(cities.document("DC").set(new City("Washington D.C.", null, "USA", true, 680000L, Arrays.asList("east_coast")))); futures.add(cities.document("TOK").set(new City("Tokyo", null, "Japan", true, 9000000L, Arrays.asList("kanto", "honshu")))); futures.add(cities.document("BJ").set(new City("Beijing", null, "China", true, 21500000L, Arrays.asList("jingjinji", "hebei")))); // (optional) block on operation ApiFutures.allAsList(futures).get(); // [END fs_retrieve_create_examples] }
/** * Creates cities collection and add sample documents to test queries. * * @return collection reference */ void prepareExamples() throws Exception { // [START fs_query_create_examples] CollectionReference cities = db.collection("cities"); List<ApiFuture<WriteResult>> futures = new ArrayList<>(); futures.add(cities.document("SF").set(new City("San Francisco", "CA", "USA", false, 860000L, Arrays.asList("west_coast", "norcal")))); futures.add(cities.document("LA").set(new City("Los Angeles", "CA", "USA", false, 3900000L, Arrays.asList("west_coast", "socal")))); futures.add(cities.document("DC").set(new City("Washington D.C.", null, "USA", true, 680000L, Arrays.asList("east_coast")))); futures.add(cities.document("TOK").set(new City("Tokyo", null, "Japan", true, 9000000L, Arrays.asList("kanto", "honshu")))); futures.add(cities.document("BJ").set(new City("Beijing", null, "China", true, 21500000L, Arrays.asList("jingjinji", "hebei")))); // (optional) block on documents successfully added ApiFutures.allAsList(futures).get(); // [END fs_query_create_examples] }
List<String> messageIds = ApiFutures.allAsList(futures).get();
public void flush() { // BUG(1795): We should force batcher to issue RPC call for buffered messages, // so the code below doesn't wait uselessly. ArrayList<ApiFuture<Void>> writesToFlush = new ArrayList<>(); synchronized (writeLock) { writesToFlush.addAll(pendingWrites); } try { ApiFutures.allAsList(writesToFlush).get(FLUSH_WAIT_TIMEOUT_SECONDS, TimeUnit.SECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { throw new RuntimeException(e); } }
@Override public void flush(Map<TopicPartition, OffsetAndMetadata> partitionOffsets) { log.debug("Flushing..."); // Process results of all the outstanding futures specified by each TopicPartition. for (Map.Entry<TopicPartition, OffsetAndMetadata> partitionOffset : partitionOffsets.entrySet()) { log.trace("Received flush for partition " + partitionOffset.getKey().toString()); Map<Integer, OutstandingFuturesForPartition> outstandingFuturesForTopic = allOutstandingFutures.get(partitionOffset.getKey().topic()); if (outstandingFuturesForTopic == null) { continue; } OutstandingFuturesForPartition outstandingFutures = outstandingFuturesForTopic.get(partitionOffset.getKey().partition()); if (outstandingFutures == null) { continue; } try { ApiFutures.allAsList(outstandingFutures.futures).get(); } catch (Exception e) { throw new RuntimeException(e); } finally { outstandingFutures.futures.clear(); } } allOutstandingFutures.clear(); }