/** * Process add/remove/update of an incoming profile. */ public void processIncoming(ClusterDistributionManager dm, String adviseePath, boolean removeProfile, boolean exchangeProfiles, final List<Profile> replyProfiles) { // nothing by default; just log that nothing was done if (logger.isDebugEnabled()) { logger.debug("While processing UpdateAttributes message ignored incoming profile: {}", this); } }
@Override public void success(String id, Tuple<Long, Settings> settings) { if(latch.getCount() <= 0) { log.error("Latch already counted down (for {} of {}) (index={})", id, Arrays.toString(events), searchguardIndex); } rs.put(id, settings); latch.countDown(); if(log.isDebugEnabled()) { log.debug("Received config for {} (of {}) with current latch value={}", id, Arrays.toString(events), latch.getCount()); } }
private static void releasePRIDLock(final DistributedLockService lockService) { try { lockService.unlock(PartitionedRegionHelper.MAX_PARTITIONED_REGION_ID); if (logger.isDebugEnabled()) { logger.debug("releasePRIDLock: Released the dlock in allPartitionedRegions for {}", PartitionedRegionHelper.MAX_PARTITIONED_REGION_ID); } } catch (Exception es) { logger.warn(String.format("releasePRIDLock: unlocking %s caught an exception", Integer.valueOf(PartitionedRegionHelper.MAX_PARTITIONED_REGION_ID)), es); } }
@Override public void close(boolean keepAlive) { if (logger.isDebugEnabled()) { logger.debug("Shutting down connection manager with keepAlive {}", keepAlive); if (!this.loadConditioningProcessor.awaitTermination(PoolImpl.SHUTDOWN_TIMEOUT, TimeUnit.MILLISECONDS)) { logger.warn("Timeout waiting for load conditioning tasks to complete"); logger.error("Error stopping loadConditioningProcessor", e); } catch (InterruptedException e) { logger.error( "Interrupted stopping loadConditioningProcessor", e);
@Override public void sqlTimingOccurred(Spy spy, long execTime, String methodCall, String sql) { //test useless in the current implementation, //as if error level is not enabled for this logger, //the ConnectionSpy will not be used (see isjdbcLoggingEnabled()) //might maybe change one day? /*if (!LOGGER.isErrorEnabled()) { return; }*/ String operation = this.getSqlOperation(sql); if (Properties.isDumpSqlFilteringOn() && !this.shouldSqlBeLogged(operation)) { return; } Marker marker = this.getStatementMarker(operation); SqlTimingOccurredMessage message = new SqlTimingOccurredMessage(spy, execTime, methodCall, sql, LOGGER.isDebugEnabled(marker)); if (Properties.isSqlTimingErrorThresholdEnabled() && execTime >= Properties.getSqlTimingErrorThresholdMsec()) { LOGGER.error(marker, message); } else if (LOGGER.isWarnEnabled()) { if (Properties.isSqlTimingWarnThresholdEnabled() && execTime >= Properties.getSqlTimingWarnThresholdMsec()) { LOGGER.warn(marker, message); } else { LOGGER.info(marker, message); } } }
/** * Handle rejected execution for a function execution thread. Spin off a thread directly in * this case, since that means a function is executing another function. The child function * request shouldn't be in the queue behind the parent request since the parent function is * dependent on the child function executing. */ private void handleRejectedExecutionForFunctionExecutionThread(Runnable r, ThreadPoolExecutor executor) { if (logger.isDebugEnabled()) { logger.warn("An additional " + FUNCTION_EXECUTION_PROCESSOR_THREAD_PREFIX + " thread is being launched to prevent slow performance due to nested function executions"); } launchAdditionalThread(r, executor); }
@Override public void onDisconnect(InternalDistributedSystem sys) { if (logger.isDebugEnabled()) { this.logger.debug("Calling AdminDistributedSystemJmxImpl#onDisconnect"); this.mbeanName, notificationSequenceNumber.addAndGet(1), null)); } catch (MBeanException e) { logger.warn(e.getMessage(), e); logger.warn(e.getMessage(), e); throw e; } catch (VirtualMachineError err) { logger.error(e.getMessage(), e); throw e; if (logger.isDebugEnabled()) { this.logger.debug("Completed AdminDistributedSystemJmxImpl#onDisconnect");
public static InterClusterRequestEvaluator instantiateInterClusterRequestEvaluator(final String clazz, final Settings settings) { try { final Class<?> clazz0 = Class.forName(clazz); final InterClusterRequestEvaluator ret = (InterClusterRequestEvaluator) clazz0.getConstructor(Settings.class).newInstance(settings); addLoadedModule(clazz0); return ret; } catch (final Throwable e) { log.warn("Unable to load inter cluster request evaluator '{}' due to {}", clazz, e.toString()); if(log.isDebugEnabled()) { log.debug("Stacktrace: ",e); } return new DefaultInterClusterRequestEvaluator(settings); } }
@Override public void success(String type, Tuple<Long, Settings> settings) { if(latch.getCount() <= 0) { log.error("Latch already counted down (for {} of {}) (index={})", type, Arrays.toString(events), searchguardIndex); } rs.put(type, settings); latch.countDown(); if(log.isDebugEnabled()) { log.debug("Received config for {} (of {}) with current latch value={}", type, Arrays.toString(events), latch.getCount()); } }
@Override public void endpointNoLongerInUse(Endpoint endpoint) { int count = endpointCount.decrementAndGet(); if (logger.isDebugEnabled()) { logger.debug("InstantiatorRecoveryTask - EndpointNoLongerInUse. Now have {} endpoints", count); } }
@Override public void disconnect() { try { super.disconnect(); // Save existing StatAlert Definitions saveAlertDefinitionsAsSerializedObjects(); /* Remove Cache Listener to listen to Cache & Region create/destroy events */ if (logger.isDebugEnabled()) { logger.debug("Removing CacheAndRegionListener .... "); } removeCacheListener(cacheRegionListener); } catch (RuntimeException e) { logger.warn(e.getMessage(), e); throw e; } catch (VirtualMachineError err) { SystemFailure.initiateFailure(err); // If this ever returns, re-throw the error. We're poisoned // now, so don't let this thread continue. throw err; } catch (Error e) { // Whenever you catch Error or Throwable, you must also // catch VirtualMachineError (see above). However, there is // _still_ a possibility that you are dealing with a cascading // error condition, so you also need to check to see if the JVM // is still usable: SystemFailure.checkFailure(); logger.error(e.getMessage(), e); throw e; } }
public static PrincipalExtractor instantiatePrincipalExtractor(final String clazz) { try { final Class<?> clazz0 = Class.forName(clazz); final PrincipalExtractor ret = (PrincipalExtractor) clazz0.newInstance(); addLoadedModule(clazz0); return ret; } catch (final Throwable e) { log.warn("Unable to load pricipal extractor '{}' due to {}", clazz, e.toString()); if(log.isDebugEnabled()) { log.debug("Stacktrace: ",e); } return new DefaultPrincipalExtractor(); } }
private Function newFunction(final Class<Function> clazz, final boolean errorOnNoSuchMethod) { try { final Constructor<Function> constructor = clazz.getConstructor(); return constructor.newInstance(); } catch (NoSuchMethodException nsmex) { if (errorOnNoSuchMethod) { logger.error("Zero-arg constructor is required, but not found for class: {}", clazz.getName(), nsmex); } else { if (logger.isDebugEnabled()) { logger.debug( "Not registering function because it doesn't have a zero-arg constructor: {}", clazz.getName()); } } } catch (Exception ex) { logger.error("Error when attempting constructor for function for class: {}", clazz.getName(), ex); } return null; }
@Override public void endpointNoLongerInUse(Endpoint endpoint) { int count = endpointCount.decrementAndGet(); if (logger.isDebugEnabled()) { logger.debug("PdxRegistryRecoveryListener - EndpointNoLongerInUse. Now have {} endpoints", count); } }
if (logger.isDebugEnabled()) { logger.debug("Adding CacheAndRegionListener .... "); logger.warn(e.getMessage(), e); throw e; } catch (VirtualMachineError err) { logger.error(e.getMessage(), e); throw e;
@Override public void run2() { TXManagerImpl mgr = TXManagerImpl.currentInstance; TXStateProxy tx = mgr.suspendedTXs.remove(txId); if (tx != null) { try { if (logger.isDebugEnabled()) { logger.debug("TX: Expiry task rolling back transaction: {}", txId); } tx.rollback(); } catch (GemFireException e) { logger.warn(String.format( "Exception occurred while rolling back timed out transaction %s", txId), e); } } } }
@Override public void run() { try { run2(); } catch (VirtualMachineError e) { SystemFailure.initiateFailure(e); throw e; } catch (CancelException ignore) { if (logger.isDebugEnabled()) { logger.debug("Pool task <{}> cancelled", this); } } catch (Throwable t) { logger.error(String.format("Unexpected error in pool task <%s>", this), t); } }
private Future<?> executeDiskStoreTask(DiskStoreTask r, ExecutorService executor) { try { return executor.submit(r); } catch (RejectedExecutionException ex) { if (logger.isDebugEnabled()) { logger.debug("Ignored compact schedule during shutdown", ex); } } return null; }
log.debug("{} does not exist in cluster metadata", requestAliasOrIndex); continue; if(log.isDebugEnabled()) { log.debug("Aliases for {}: {}", requestAliasOrIndex, aliases); if(log.isDebugEnabled()) { log.debug(alias+" is a filtered alias "+aliasMetaData.getFilter()); if(log.isDebugEnabled()) { log.debug(alias+" is not an alias or does not have a filter"); log.warn("More than one ({}) filtered alias found for same index ({}). This is currently not recommended. Aliases: {}", filteredAliases.size(), requestAliasOrIndex, toString(filteredAliases)); } else if (faMode.equals("disallow")) { log.error("More than one ({}) filtered alias found for same index ({}). This is currently not supported. Aliases: {}", filteredAliases.size(), requestAliasOrIndex, toString(filteredAliases)); return true; } else { if (log.isDebugEnabled()) { log.debug("More than one ({}) filtered alias found for same index ({}). Aliases: {}", filteredAliases.size(), requestAliasOrIndex, toString(filteredAliases));
/** * Process the filter profile messages. */ public void processQueuedFilterProfileMsgs(List msgs) { final boolean isDebugEnabled = logger.isDebugEnabled(); if (msgs != null) { Iterator iter = msgs.iterator(); while (iter.hasNext()) { try { OperationMessage msg = (OperationMessage) iter.next(); if (isDebugEnabled) { logger.debug("Processing the queued filter profile message :{}", msg); } msg.processRequest(this); } catch (Exception ex) { logger.warn("Exception thrown while processing queued profile messages.", ex); } } } }