/** * Executes the query as update. * <p> * Requires the SQL statement to be an UPDATE or DELETE statement. * * @return the number of rows changed * @throws SQLException in case of a database error */ public int executeUpdate() throws SQLException { Watch w = Watch.start(); try (Connection c = ds.getConnection()) { try (PreparedStatement stmt = createPreparedStatement(c)) { if (stmt == null) { return 0; } return stmt.executeUpdate(); } } finally { w.submitMicroTiming(MICROTIMING_KEY, sql); } }
private void handleTracingAndReporting(String collection, Watch w) { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming(KEY_MONGO, "FIND ALL - " + collection + ": " + filterObject); } traceIfRequired(collection, w); }
/** * Executes one or more Redis commands and returns a value of the given type. * * @param description a description of the actions performed used for debugging and tracing * @param task the actual task to perform using redis * @param <T> the generic type of the result * @return a result computed by <tt>task</tt> */ public <T> T query(Supplier<String> description, Function<Jedis, T> task) { Watch w = Watch.start(); try (Operation op = new Operation(description, Duration.ofSeconds(10)); Jedis redis = getConnection()) { return task.apply(redis); } catch (Exception e) { throw Exceptions.handle(Redis.LOG, e); } finally { redisInstance.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("redis", description.get()); } } }
@Override public void iterate(Function<Row, Boolean> handler, @Nullable Limit limit) throws SQLException { Watch w = Watch.start(); fieldNames = null; try (Connection c = ds.getConnection()) { try (PreparedStatement stmt = createPreparedStatement(c)) { if (stmt == null) { return; } Limit effectiveLimit = limit != null ? limit : Limit.UNLIMITED; applyMaxRows(stmt, effectiveLimit); applyFetchSize(stmt, effectiveLimit); try (ResultSet rs = stmt.executeQuery()) { w.submitMicroTiming(MICROTIMING_KEY, sql); TaskContext tc = TaskContext.get(); processResultSet(handler, effectiveLimit, rs, tc); } } } }
@Override public void truncate() { Watch w = Watch.start(); Compiler compiler = compileDELETE(); try { try (Connection c = db.getConnection()) { try (PreparedStatement stmt = compiler.prepareStatement(c)) { stmt.executeUpdate(); } } finally { if (Microtiming.isEnabled()) { w.submitMicroTiming("OMA", compiler.toString()); } } } catch (Exception e) { throw queryError(compiler, e); } }
private void delete(boolean force) { try { if (forceFail) { return; } Watch w = Watch.start(); deleteByIteration(force); if (Microtiming.isEnabled()) { w.submitMicroTiming("ES", "DELETE: " + toString(true)); } } catch (Exception e) { throw Exceptions.handle(IndexAccess.LOG, e); } }
/** * Counts the number of documents in the result of the given query. * <p> * Note that limits are ignored for this query. * * @param collection the collection to search in * @return the number of documents found */ public long countIn(String collection) { Watch w = Watch.start(); try { return mongo.db().getCollection(collection).count(filterObject); } finally { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming(KEY_MONGO, "COUNT - " + collection + ": " + filterObject); } traceIfRequired(collection, w); } }
@Override public void close() throws SQLException { try (Operation op = new Operation(() -> database.name + ".close()", Duration.ofSeconds(5))) { delegate.close(); } catch (SQLException e) { // Most likely this exception will be a false alert because DBCP 1.2.2 cannot deal with connections which // are closed by their driver (due to network issues). // The next release of DBCP will fix this problem. The exception is logged at INFO level in case a "real" // problem occurred. If we wouldn't call delegate.close, the connection would remain active and might block // the pool. Databases.LOG.INFO("Error closing connection"); Databases.LOG.INFO(e); } finally { watch.submitMicroTiming("SQL", "Connection Duration: " + database.name); if (watch.elapsedMillis() > Databases.getLogConnectionThresholdMillis()) { DB.SLOW_DB_LOG.INFO("A long running connection was detected (%s): Opened:\n%s\n\nClosed:\n%s", watch.duration(), connected.toString(), ExecutionPoint.snapshot().toString()); } } }
/** * Executes the insert statement into the given collection. * * @param collection the collection to insert the document into * @return the inserted document */ public Doc into(String collection) { if (Mongo.LOG.isFINE()) { Mongo.LOG.FINE("INSERT: %s\nObject: %s", collection, obj); } Watch w = Watch.start(); mongo.db().getCollection(collection).insertOne(obj); mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("mongo", "INSERT - " + collection + ": " + obj); } return new Doc(obj); } }
@Override public int[] executeBatch() throws SQLException { Watch w = Watch.start(); try (Operation op = new Operation(() -> "executeBatch: " + preparedSQL, Duration.ofSeconds(30))) { int[] result = delegate.executeBatch(); w.submitMicroTiming("BATCH-SQL", preparedSQL); Databases.numQueries.inc(); Databases.queryDuration.addValue(w.elapsedMillis()); if (w.elapsedMillis() > Databases.getLogQueryThresholdMillis()) { Databases.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow JDBC batch query was executed (%s): %s (%s rows)\n%s", w.duration(), preparedSQL, result.length, ExecutionPoint.snapshot().toString()); } return result; } }
@Override public long count() { Watch w = Watch.start(); Compiler compiler = compileCOUNT(); try { try (Connection c = db.getConnection()) { return execCount(compiler, c); } finally { if (Microtiming.isEnabled()) { w.submitMicroTiming("OMA", compiler.toString()); } } } catch (Exception e) { throw queryError(compiler, e); } }
/** * Executes the delete statement on the given collection. * * @param collection the name of the collection to delete documents from * @return the result of the delete operation */ public DeleteResult singleFrom(String collection) { Watch w = Watch.start(); try { if (Mongo.LOG.isFINE()) { Mongo.LOG.FINE("DELETE: %s\nFilter: %s", collection, filterObject); } return mongo.db().getCollection(collection).deleteOne(filterObject); } finally { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("mongo", "DELETE - " + collection + ": " + filterObject); } traceIfRequired(collection, w); } }
/** * Internal execution of the query along with the transformation of the result for a count of entities * * @param builder the completed query * @return the result of the query */ protected long transformCount(SearchRequestBuilder builder) { Watch w = Watch.start(); SearchResponse res = builder.execute().actionGet(); if (IndexAccess.LOG.isFINE()) { IndexAccess.LOG.FINE("COUNT: %s.%s: SUCCESS: %d", indexAccess.getIndex(clazz), indexAccess.getDescriptor(clazz).getType(), res.getHits().getTotalHits()); } if (Microtiming.isEnabled()) { w.submitMicroTiming("ES", "COUNT: " + toString(true)); } return res.getHits().getTotalHits(); }
/** * Executes the delete statement on the given collection. * * @param collection the name of the collection to delete documents from * @return the result of the delete operation */ public DeleteResult manyFrom(String collection) { Watch w = Watch.start(); try { if (Mongo.LOG.isFINE()) { Mongo.LOG.FINE("DELETE: %s\nFilter: %s", collection, filterObject); } return mongo.db().getCollection(collection).deleteMany(filterObject); } finally { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("mongo", "DELETE - " + collection + ": " + filterObject); } traceIfRequired(collection, w); } } }
@Override public void iterate(Function<E, Boolean> handler) { Compiler compiler = compileSELECT(); try { Watch w = Watch.start(); try (Connection c = db.getConnection(); PreparedStatement stmt = compiler.prepareStatement(c)) { Limit limit = getLimit(); boolean nativeLimit = db.hasCapability(Capability.LIMIT); tuneStatement(stmt, limit, nativeLimit); try (ResultSet rs = stmt.executeQuery()) { execIterate(handler, compiler, limit, nativeLimit, rs); } } finally { if (Microtiming.isEnabled()) { w.submitMicroTiming("OMA", compiler.toString()); } } } catch (Exception e) { throw queryError(compiler, e); } }
protected void updateStatistics(String sql, Watch w) { w.submitMicroTiming("SQL", sql); Databases.numQueries.inc(); Databases.queryDuration.addValue(w.elapsedMillis()); if (w.elapsedMillis() > Databases.getLogQueryThresholdMillis()) { Databases.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow JDBC query was executed (%s): %s\n%s", w.duration(), sql, ExecutionPoint.snapshot().toString()); } }
protected void updateStatistics(String sql, Watch w) { w.submitMicroTiming("SQL", sql); Databases.numQueries.inc(); Databases.queryDuration.addValue(w.elapsedMillis()); if (w.elapsedMillis() > Databases.getLogQueryThresholdMillis()) { Databases.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow JDBC query was executed (%s): %s\n%s", w.duration(), sql, ExecutionPoint.snapshot().toString()); } }
private <E extends Entity> E executeUpdate(E entity, EntityDescriptor descriptor, IndexRequestBuilder irb, final boolean runSaveChecks) { Watch w = Watch.start(); IndexResponse indexResponse = irb.execute().actionGet(); if (LOG.isFINE()) { LOG.FINE("SAVE: %s.%s: %s (%d) SUCCEEDED", schema.getIndex(entity), descriptor.getType(), indexResponse.getId(), indexResponse.getVersion()); } entity.id = indexResponse.getId(); entity.version = indexResponse.getVersion(); if (runSaveChecks) { entity.afterSave(); } queryDuration.addValue(w.elapsedMillis()); w.submitMicroTiming("ES", "UPDATE " + entity.getClass().getName()); traceChange(entity); return entity; }
protected void handlePubSubMessage(String channel, String message, Subscriber subscriber) { tasks.executor("redis-pubsub").start(() -> { Watch w = Watch.start(); try { subscriber.onMessage(message); } catch (Exception e) { Exceptions.handle() .to(LOG) .error(e) .withSystemErrorMessage("Failed to process a message '%s' for topic '%s': %s (%s)", message, subscriber.getTopic()) .handle(); } w.submitMicroTiming("redis", channel); messageDuration.addValue(w.elapsedMillis()); }); }
private <E extends Entity> List<E> executeBulkUpdate(List<E> entities, BulkRequestBuilder brb) { Watch w = Watch.start(); BulkResponse indexResponse = brb.execute().actionGet(); if (!indexResponse.hasFailures() && LOG.isFINE()) { LOG.FINE("BULK-SAVE SUCCEEDED"); } else if (indexResponse.hasFailures()) { Exceptions.handle().withSystemErrorMessage(indexResponse.buildFailureMessage()).handle(); } for (int i = 0; i < indexResponse.getItems().length; i++) { E entity = entities.get(i); entity.id = indexResponse.getItems()[i].getId(); entity.version = indexResponse.getItems()[i].getVersion(); if (!indexResponse.getItems()[i].isFailed()) { entity.afterSave(); } traceChange(entity); } queryDuration.addValue(w.elapsedMillis()); w.submitMicroTiming("ES", "BULK-UPDATE"); return entities; }