@Override public void start() throws Exception { JsonObject config = new JsonObject().put("url", "jdbc:hsqldb:mem:test?shutdown=true") .put("driver_class", "org.hsqldb.jdbcDriver"); JDBCClient jdbc = JDBCClient.createShared(vertx, config); // Connect to the database jdbc.rxGetConnection().flatMap(conn -> { // Now chain some statements using flatmap composition Single<ResultSet> resa = conn.rxUpdate("CREATE TABLE test(col VARCHAR(20))") .flatMap(result -> conn.rxUpdate("INSERT INTO test (col) VALUES ('val1')")) .flatMap(result -> conn.rxUpdate("INSERT INTO test (col) VALUES ('val2')")) .flatMap(result -> conn.rxQuery("SELECT * FROM test")); return resa.doAfterTerminate(conn::close); }).subscribe(resultSet -> { // Subscribe to the final result System.out.println("Results : " + resultSet.getRows()); }, err -> { System.out.println("Database problem"); err.printStackTrace(); }); } }
}).doAfterTerminate(() -> operationDuration.stop() );
.doAfterTerminate(conn::close) ).subscribe(resultSet -> {
}); }).doAfterTerminate(() -> operationDuration.stop() );
subscriber.onError(e); }), scheduler).doAfterTerminate(() -> operationDuration.stop() );
throw sneakyThrow(new EVCacheException("Exception getting data for APP " + _appName + ", key = " + canonicalKey, ex)); }).doAfterTerminate(() -> { op.stop(); if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", Took " + op.getDuration() + " milliSec.");
public Single<T> get(long duration, TimeUnit units, boolean throwException, boolean hasZF, Scheduler scheduler) { return observe().timeout(duration, units, Single.create(subscriber -> { // whenever timeout occurs, continuous timeout counter will increase by 1. MemcachedConnection.opTimedOut(op); if (op != null) op.timeOut(); if (!hasZF) EVCacheMetricsFactory.getCounter(appName, null, serverGroup.getName(), appName + "-get-CheckedOperationTimeout", DataSourceType.COUNTER).increment(); if (throwException) { subscriber.onError(new CheckedOperationTimeoutException("Timed out waiting for operation", op)); } else { if (isCancelled()) { if (hasZF) EVCacheMetricsFactory.getCounter(appName, null, serverGroup.getName(), appName + "-get-Cancelled", DataSourceType.COUNTER).increment(); } subscriber.onSuccess(objRef.get()); } }), scheduler).doAfterTerminate(new Action0() { @Override public void call() { } } ); }
throw sneakyThrow(new EVCacheException("Exception executing getAndTouch APP " + _appName + ", key = " + canonicalKey, ex)); }).doAfterTerminate(() -> { op.stop(); if (log.isDebugEnabled() && shouldLog()) log.debug("GET_AND_TOUCH : APP " + _appName + ", Took " + op.getDuration() + " milliSec.");
/** * Generates a {@link Single} from {@link SQLConnection} operations. * * @param client the {@link SQLClient} * @param sourceSupplier a user-provided function returning a {@link Single} generated by interacting with the given {@link SQLConnection} * @param <T> the type of the item emitted by the {@link Single} * @return a {@link Single} generated from {@link SQLConnection} operations */ public static <T> Single<T> usingConnectionSingle(SQLClient client, Function<SQLConnection, Single<T>> sourceSupplier) { return client.rxGetConnection().flatMap(conn -> { return sourceSupplier.apply(conn).doAfterTerminate(conn::close); }); }
/** * Generates a {@link Single} from {@link SQLConnection} operations. * * @param client the {@link SQLClient} * @param sourceSupplier a user-provided function returning a {@link Single} generated by interacting with the given {@link SQLConnection} * @param <T> the type of the item emitted by the {@link Single} * @return a {@link Single} generated from {@link SQLConnection} operations */ public static <T> Single<T> usingConnectionSingle(SQLClient client, Function<SQLConnection, Single<T>> sourceSupplier) { return client.rxGetConnection().flatMap(conn -> { return sourceSupplier.apply(conn).doAfterTerminate(conn::close); }); }
@Override public Single<Optional<CartEvent>> retrieveOne(Long id) { return client.rxGetConnection() .flatMap(conn -> conn.rxQueryWithParams(RETRIEVE_STATEMENT, new JsonArray().add(id)) .map(ResultSet::getRows) .map(list -> { if (list.isEmpty()) { return Optional.<CartEvent>empty(); } else { return Optional.of(list.get(0)) .map(this::wrapCartEvent); } }) .doAfterTerminate(conn::close) ); }
.doAfterTerminate(progress::dispose);
public CartEventDataSourceImpl(io.vertx.core.Vertx vertx, JsonObject json) { this.client = JDBCClient.createNonShared(Vertx.newInstance(vertx), json); // TODO: Should not init the table here. client.rxGetConnection() .flatMap(connection -> connection.rxExecute(INIT_STATEMENT) .doAfterTerminate(connection::close) ) .subscribe(); }
return importFile(contentType, f, correlationId, filename, timestamp, layer, tags, properties, fallbackCRSString, contentEncoding) .doAfterTerminate(() -> {
@Override public Single<Void> save(CartEvent cartEvent) { JsonArray params = new JsonArray().add(cartEvent.getCartEventType().name()) .add(cartEvent.getUserId()) .add(cartEvent.getProductId()) .add(cartEvent.getAmount()) .add(cartEvent.getCreatedAt() > 0 ? cartEvent.getCreatedAt() : System.currentTimeMillis()); return client.rxGetConnection() .flatMap(conn -> conn.rxUpdateWithParams(SAVE_STATEMENT, params) .map(r -> (Void) null) .doAfterTerminate(conn::close) ); }
/** * Download and extract Elasticsearch to a given location no matter if the * destination folder exists or not. * @param downloadUrl the URL to the ZIP file containing Elasticsearch * @param destPath the path to the destination folder where the downloaded ZIP * file should be extracted to * @param strip <code>true</code> if the first path element of all items in the * ZIP file should be stripped away. * @return emitting the path to the extracted Elasticsearch */ private Single<String> downloadNoCheck(String downloadUrl, String destPath, boolean strip) { log.info("Downloading Elasticsearch ..."); log.info("Source: " + downloadUrl); log.info("Dest: " + destPath); // download the archive, extract it and finally delete it return downloadArchive(downloadUrl) .flatMap(archivePath -> { return extractArchive(archivePath, destPath, strip) .doAfterTerminate(() -> { FileSystem fs = vertx.fileSystem(); fs.deleteBlocking(archivePath); }); }); }
/** * Test if a service is really published only once * @param context the test context */ @Test public void publishOnce(TestContext context) { Vertx vertx = new Vertx(rule.vertx()); Async async = context.async(); ServiceDiscovery discovery = ServiceDiscovery.create(vertx); Service.publishOnce("A", "A", discovery, vertx) .andThen(Service.publishOnce("A", "A", discovery, vertx)) .andThen(Service.publishOnce("A", "B", discovery, vertx)) .andThen(discovery.rxGetRecords(record -> true)) .doAfterTerminate(discovery::close) .subscribe(recordList -> { List<Record> lA = recordList.stream() .filter(r -> r.getName().equals("A")) .collect(Collectors.toList()); context.assertEquals(2, lA.size()); async.complete(); }, context::fail); }
publishJobFinished(jobDetails); }) .doAfterTerminate(() -> publishJobFinished(jobDetails)) .toCompletable();
private Single<List<String>> inTransaction(Exception e) throws Exception { return client.rxGetConnection().flatMap(conn -> { return rxInsertExtraFolks(conn) .andThen(uniqueNames(conn)) .<List<String>>collect(ArrayList::new, List::add).toSingle() .compose(upstream -> e == null ? upstream : upstream.flatMap(names -> Single.error(e))) .compose(SQLClientHelper.txSingleTransformer(conn)) .flatMap(names -> rxAssertAutoCommit(conn).andThen(Single.just(names))) .doAfterTerminate(conn::close); }); } }