@Override public void iterate(Function<Row, Boolean> handler, @Nullable Limit limit) throws SQLException { Watch w = Watch.start(); try (ResultSet rs = query.prepareStmt().executeQuery()) { query.avarage.addValue(w.elapsedMillis()); TaskContext tc = TaskContext.get(); processResultSet(handler, limit, rs, tc); } }
private void fixMDC() { if (logger.isDebugEnabled() || Sirius.isDev() || Sirius.isStartedAsTest()) { CallContext callContext = CallContext.getCurrent(); MDC.put("flow", "|" + callContext.getWatch().elapsedMillis() + "ms"); } }
private void handleTracingAndReporting(String collection, Watch w) { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming(KEY_MONGO, "FIND ALL - " + collection + ": " + filterObject); } traceIfRequired(collection, w); }
protected void updateStatistics(String sql, Watch w) { w.submitMicroTiming("SQL", sql); Databases.numQueries.inc(); Databases.queryDuration.addValue(w.elapsedMillis()); if (w.elapsedMillis() > Databases.getLogQueryThresholdMillis()) { Databases.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow JDBC query was executed (%s): %s\n%s", w.duration(), sql, ExecutionPoint.snapshot().toString()); } }
protected void updateStatistics(String sql, Watch w) { w.submitMicroTiming("SQL", sql); Databases.numQueries.inc(); Databases.queryDuration.addValue(w.elapsedMillis()); if (w.elapsedMillis() > Databases.getLogQueryThresholdMillis()) { Databases.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow JDBC query was executed (%s): %s\n%s", w.duration(), sql, ExecutionPoint.snapshot().toString()); } }
/** * Executes one or more Redis commands and returns a value of the given type. * * @param description a description of the actions performed used for debugging and tracing * @param task the actual task to perform using redis * @param <T> the generic type of the result * @return a result computed by <tt>task</tt> */ public <T> T query(Supplier<String> description, Function<Jedis, T> task) { Watch w = Watch.start(); try (Operation op = new Operation(description, Duration.ofSeconds(10)); Jedis redis = getConnection()) { return task.apply(redis); } catch (Exception e) { throw Exceptions.handle(Redis.LOG, e); } finally { redisInstance.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("redis", description.get()); } } }
@Override public int[] executeBatch() throws SQLException { Watch w = Watch.start(); try (Operation op = new Operation(() -> "executeBatch: " + preparedSQL, Duration.ofSeconds(30))) { int[] result = delegate.executeBatch(); w.submitMicroTiming("BATCH-SQL", preparedSQL); Databases.numQueries.inc(); Databases.queryDuration.addValue(w.elapsedMillis()); if (w.elapsedMillis() > Databases.getLogQueryThresholdMillis()) { Databases.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow JDBC batch query was executed (%s): %s (%s rows)\n%s", w.duration(), preparedSQL, result.length, ExecutionPoint.snapshot().toString()); } return result; } }
protected void traceIfRequired(String collection, Watch w) { if (mongo.tracing && w.elapsedMillis() >= mongo.traceLimit) { String location = determineLocation(); Doc explanation = explain(collection); mongo.traceData.put(location, Tuple.create(collection + ": " + filterObject.toString() + " [" + w.duration() + "]", explanation.toString())); } if (w.elapsedMillis() > mongo.getLogQueryThresholdMillis()) { mongo.numSlowQueries.inc(); DB.SLOW_DB_LOG.INFO("A slow MongoDB query was executed (%s): %s\n%s\n%s", w.duration(), collection, filterObject, ExecutionPoint.snapshot().toString()); } }
/** * Counts the number of documents in the result of the given query. * <p> * Note that limits are ignored for this query. * * @param collection the collection to search in * @return the number of documents found */ public long countIn(String collection) { Watch w = Watch.start(); try { return mongo.db().getCollection(collection).count(filterObject); } finally { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming(KEY_MONGO, "COUNT - " + collection + ": " + filterObject); } traceIfRequired(collection, w); } }
protected void tryCommit(boolean cascade) { if (stmt == null) { return; } if (batchBacklog > 0) { try { Watch w = Watch.start(); stmt.executeBatch(); avarage.addValues(batchBacklog, w.elapsedMillis()); batchBacklog = 0; } catch (SQLException e) { if (cascade) { context.safeClose(); } throw Exceptions.handle() .to(OMA.LOG) .error(e) .withSystemErrorMessage("An error occured while batch executing a statement: %s (%s)") .handle(); } } }
@Override public void close() throws SQLException { try (Operation op = new Operation(() -> database.name + ".close()", Duration.ofSeconds(5))) { delegate.close(); } catch (SQLException e) { // Most likely this exception will be a false alert because DBCP 1.2.2 cannot deal with connections which // are closed by their driver (due to network issues). // The next release of DBCP will fix this problem. The exception is logged at INFO level in case a "real" // problem occurred. If we wouldn't call delegate.close, the connection would remain active and might block // the pool. Databases.LOG.INFO("Error closing connection"); Databases.LOG.INFO(e); } finally { watch.submitMicroTiming("SQL", "Connection Duration: " + database.name); if (watch.elapsedMillis() > Databases.getLogConnectionThresholdMillis()) { DB.SLOW_DB_LOG.INFO("A long running connection was detected (%s): Opened:\n%s\n\nClosed:\n%s", watch.duration(), connected.toString(), ExecutionPoint.snapshot().toString()); } } }
/** * Executes the insert statement into the given collection. * * @param collection the collection to insert the document into * @return the inserted document */ public Doc into(String collection) { if (Mongo.LOG.isFINE()) { Mongo.LOG.FINE("INSERT: %s\nObject: %s", collection, obj); } Watch w = Watch.start(); mongo.db().getCollection(collection).insertOne(obj); mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("mongo", "INSERT - " + collection + ": " + obj); } return new Doc(obj); } }
mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming(KEY_MONGO, "FIND ONE - " + collection + ": " + filterObject);
/** * Executes the delete statement on the given collection. * * @param collection the name of the collection to delete documents from * @return the result of the delete operation */ public DeleteResult singleFrom(String collection) { Watch w = Watch.start(); try { if (Mongo.LOG.isFINE()) { Mongo.LOG.FINE("DELETE: %s\nFilter: %s", collection, filterObject); } return mongo.db().getCollection(collection).deleteOne(filterObject); } finally { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("mongo", "DELETE - " + collection + ": " + filterObject); } traceIfRequired(collection, w); } }
/** * Executes the delete statement on the given collection. * * @param collection the name of the collection to delete documents from * @return the result of the delete operation */ public DeleteResult manyFrom(String collection) { Watch w = Watch.start(); try { if (Mongo.LOG.isFINE()) { Mongo.LOG.FINE("DELETE: %s\nFilter: %s", collection, filterObject); } return mongo.db().getCollection(collection).deleteMany(filterObject); } finally { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("mongo", "DELETE - " + collection + ": " + filterObject); } traceIfRequired(collection, w); } } }
@Override public void run() { try { Watch w = Watch.start(); try { if (ctx == null) { CallContext.initialize(); } else { CallContext.setCurrent(ctx); } TaskContext.get().setSystem(SYSTEM_ASYNC).setSubSystem(category).setJob(String.valueOf(jobNumber)); runnable.run(); promise.success(null); } finally { CallContext.detach(); durationAverage.addValue(w.elapsedMillis()); } } catch (Exception t) { Exceptions.handle(Tasks.LOG, t); promise.fail(t); } }
/** * Executes the update on the given collection. * * @param collection the collection to update * @return the result of the update */ public UpdateResult executeFor(String collection) { Document updateObject = prepareUpdate(collection); Watch w = Watch.start(); try { if (Mongo.LOG.isFINE()) { Mongo.LOG.FINE("UPDATE: %s\nFilter: %s\n Update:%s", collection, filterObject, updateObject); } UpdateOptions updateOptions = new UpdateOptions().upsert(this.upsert); if (many) { return mongo.db().getCollection(collection).updateMany(filterObject, updateObject, updateOptions); } else { return mongo.db().getCollection(collection).updateOne(filterObject, updateObject, updateOptions); } } finally { mongo.callDuration.addValue(w.elapsedMillis()); if (Microtiming.isEnabled()) { w.submitMicroTiming("mongo", "UPDATE - " + collection + ": " + filterObject); } traceIfRequired(collection, w); } }
private <E extends Entity> E executeUpdate(E entity, EntityDescriptor descriptor, IndexRequestBuilder irb, final boolean runSaveChecks) { Watch w = Watch.start(); IndexResponse indexResponse = irb.execute().actionGet(); if (LOG.isFINE()) { LOG.FINE("SAVE: %s.%s: %s (%d) SUCCEEDED", schema.getIndex(entity), descriptor.getType(), indexResponse.getId(), indexResponse.getVersion()); } entity.id = indexResponse.getId(); entity.version = indexResponse.getVersion(); if (runSaveChecks) { entity.afterSave(); } queryDuration.addValue(w.elapsedMillis()); w.submitMicroTiming("ES", "UPDATE " + entity.getClass().getName()); traceChange(entity); return entity; }
protected void handlePubSubMessage(String channel, String message, Subscriber subscriber) { tasks.executor("redis-pubsub").start(() -> { Watch w = Watch.start(); try { subscriber.onMessage(message); } catch (Exception e) { Exceptions.handle() .to(LOG) .error(e) .withSystemErrorMessage("Failed to process a message '%s' for topic '%s': %s (%s)", message, subscriber.getTopic()) .handle(); } w.submitMicroTiming("redis", channel); messageDuration.addValue(w.elapsedMillis()); }); }
private <E extends Entity> List<E> executeBulkUpdate(List<E> entities, BulkRequestBuilder brb) { Watch w = Watch.start(); BulkResponse indexResponse = brb.execute().actionGet(); if (!indexResponse.hasFailures() && LOG.isFINE()) { LOG.FINE("BULK-SAVE SUCCEEDED"); } else if (indexResponse.hasFailures()) { Exceptions.handle().withSystemErrorMessage(indexResponse.buildFailureMessage()).handle(); } for (int i = 0; i < indexResponse.getItems().length; i++) { E entity = entities.get(i); entity.id = indexResponse.getItems()[i].getId(); entity.version = indexResponse.getItems()[i].getVersion(); if (!indexResponse.getItems()[i].isFailed()) { entity.afterSave(); } traceChange(entity); } queryDuration.addValue(w.elapsedMillis()); w.submitMicroTiming("ES", "BULK-UPDATE"); return entities; }