/** * Process add/remove/update of an incoming profile. */ public void processIncoming(ClusterDistributionManager dm, String adviseePath, boolean removeProfile, boolean exchangeProfiles, final List<Profile> replyProfiles) { // nothing by default; just log that nothing was done if (logger.isDebugEnabled()) { logger.debug("While processing UpdateAttributes message ignored incoming profile: {}", this); } }
@Override public void success(String id, Tuple<Long, Settings> settings) { if(latch.getCount() <= 0) { log.error("Latch already counted down (for {} of {}) (index={})", id, Arrays.toString(events), searchguardIndex); } rs.put(id, settings); latch.countDown(); if(log.isDebugEnabled()) { log.debug("Received config for {} (of {}) with current latch value={}", id, Arrays.toString(events), latch.getCount()); } }
private static void releasePRIDLock(final DistributedLockService lockService) { try { lockService.unlock(PartitionedRegionHelper.MAX_PARTITIONED_REGION_ID); if (logger.isDebugEnabled()) { logger.debug("releasePRIDLock: Released the dlock in allPartitionedRegions for {}", PartitionedRegionHelper.MAX_PARTITIONED_REGION_ID); } } catch (Exception es) { logger.warn(String.format("releasePRIDLock: unlocking %s caught an exception", Integer.valueOf(PartitionedRegionHelper.MAX_PARTITIONED_REGION_ID)), es); } }
private void select_NonSelectResults(Object results, List<SelectResultRow> list) { if (logger.isDebugEnabled()) { logger.debug("BeanResults : Bean Results class is {}", results.getClass()); } list.add(createSelectResultRow(results)); }
@Override public List<RestHandler> getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<DiscoveryNodes> nodesInCluster) { final List<RestHandler> handlers = new ArrayList<RestHandler>(1); if (!client && !tribeNodeClient && !disabled) { handlers.addAll(super.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter, indexNameExpressionResolver, nodesInCluster)); if(!sslOnly) { handlers.add(new SearchGuardInfoAction(settings, restController, Objects.requireNonNull(evaluator), Objects.requireNonNull(threadPool))); handlers.add(new KibanaInfoAction(settings, restController, Objects.requireNonNull(evaluator), Objects.requireNonNull(threadPool))); handlers.add(new SearchGuardLicenseAction(settings, restController)); handlers.add(new SearchGuardHealthAction(settings, restController, Objects.requireNonNull(backendRegistry))); handlers.add(new TenantInfoAction(settings, restController, Objects.requireNonNull(evaluator), Objects.requireNonNull(threadPool), Objects.requireNonNull(cs), Objects.requireNonNull(adminDns))); Collection<RestHandler> apiHandler = ReflectionHelper .instantiateMngtRestApiHandler(settings, configPath, restController, localClient, adminDns, cr, cs, Objects.requireNonNull(principalExtractor), evaluator, threadPool, Objects.requireNonNull(auditLog)); handlers.addAll(apiHandler); log.debug("Added {} management rest handler(s)", apiHandler.size()); } } return handlers; }
public int delete(User user, int dashId, int deviceId, String[] pins) throws IOException { log.debug("Removing selected pin data for dashId {}, deviceId {}.", dashId, deviceId); Path userReportingPath = getUserReportingFolderPath(user); int count = 0; List<String> prefixes = new ArrayList<>(); for (String pin : pins) { prefixes.add(generateFilenamePrefix(dashId, deviceId, pin)); } try (DirectoryStream<Path> userReportingFolder = Files.newDirectoryStream(userReportingPath, "*")) { for (Path reportingFile : userReportingFolder) { String userFileName = reportingFile.getFileName().toString(); if (containsPrefix(prefixes, userFileName)) { FileUtils.deleteQuietly(reportingFile); count++; } } } return count; }
private void recordViewRequest(DistributionMessage request) { try { synchronized (viewRequests) { if (request instanceof JoinRequestMessage) { if (isCoordinator && !services.getConfig().getDistributionConfig().getSecurityUDPDHAlgo().isEmpty()) { services.getMessenger().initClusterKey(); JoinRequestMessage jreq = (JoinRequestMessage) request; // this will inform about cluster-secret key, as we have authenticated at this point JoinResponseMessage response = new JoinResponseMessage(jreq.getSender(), services.getMessenger().getClusterSecretKey(), jreq.getRequestId()); services.getMessenger().send(response); } } logger.debug("Recording the request to be processed in the next membership view"); viewRequests.add(request); viewRequests.notifyAll(); } } catch (RuntimeException | Error t) { logger.warn("unable to record a membership view request due to this exception", t); throw t; } }
public static void main(final String[] args) { try (final LoggerContext ctx = Configurator.initialize(ConsoleAppenderAnsiMessagesMain.class.getName(), "target/test-classes/log4j2-console-highlight-logback.xml")) { LOG.fatal("Fatal message."); LOG.error("Error message."); LOG.warn("Warning message."); LOG.info("Information message."); LOG.debug("Debug message."); LOG.trace("Trace message."); LOG.error("Error message.", new IOException("test")); } }
@Override public void close(boolean keepAlive) { if (logger.isDebugEnabled()) { logger.debug("Shutting down connection manager with keepAlive {}", keepAlive); if (!this.loadConditioningProcessor.awaitTermination(PoolImpl.SHUTDOWN_TIMEOUT, TimeUnit.MILLISECONDS)) { logger.warn("Timeout waiting for load conditioning tasks to complete"); logger.error("Error stopping loadConditioningProcessor", e); } catch (InterruptedException e) { logger.error( "Interrupted stopping loadConditioningProcessor", e);
protected void processEventorAndWebhook(User user, DashBoard dash, int deviceId, Session session, short pin, PinType pinType, String value, long now) { try { eventorProcessor.process(user, session, dash, deviceId, pin, pinType, value, now); webhookProcessor.process(session, dash, deviceId, pin, pinType, value, now); } catch (QuotaLimitException qle) { log.debug("User {} reached notification limit for eventor/webhook.", user.name); } catch (IllegalArgumentException iae) { log.debug("Error processing webhook for {}. Reason : {}", user.email, iae.getMessage()); } catch (Exception e) { log.error("Error processing eventor/webhook.", e); } }
private static void delete(Holder holder, Channel channel, int msgId, User user, DashBoard dash, int deviceId, String[] pins) { holder.blockingIOProcessor.executeHistory(() -> { try { int removedCounter = holder.reportingDiskDao.delete(user, dash.id, deviceId, pins); log.debug("Removed {} files for dashId {} and deviceId {}", removedCounter, dash.id, deviceId); channel.writeAndFlush(ok(msgId), channel.voidPromise()); } catch (Exception e) { log.warn("Error removing device data. Reason : {}.", e.getMessage()); channel.writeAndFlush(illegalCommand(msgId), channel.voidPromise()); } }); }
@Override protected void initializeMessageQueue(String id) { for (int i = 0; i < sender.getDispatcherThreads(); i++) { processors.add( new SerialGatewaySenderEventProcessor(this.sender, id + "." + i, getThreadMonitorObj())); if (logger.isDebugEnabled()) { logger.debug("Created the SerialGatewayEventProcessor_{}->{}", i, processors.get(i)); } } }
@Override public void execute(FunctionContext context) { try { RegionFunctionContext ctx = (RegionFunctionContext) context; Region region = PartitionRegionHelper.getLocalDataForContext(ctx); Set<?> keys = ctx.getFilter(); List<PageEntry> results = new PageResults(keys.size()); for (Object key : keys) { PageEntry entry = getEntry(region, key); if (entry != null) { results.add(entry); } } ctx.getResultSender().lastResult(results); } catch (CacheClosedException | PrimaryBucketException e) { logger.debug("Exception during lucene query function", e); throw new InternalFunctionInvocationTargetException(e); } }
public static void main(final String[] args) { System.setProperty("log4j.skipJansi", "false"); // LOG4J2-2087: explicitly enable try (final LoggerContext ctx = Configurator.initialize(ConsoleAppenderAnsiMessagesMain.class.getName(), "target/test-classes/log4j2-console.xml")) { LOG.fatal("\u001b[1;35mFatal message.\u001b[0m"); LOG.error("\u001b[1;31mError message.\u001b[0m"); LOG.warn("\u001b[0;33mWarning message.\u001b[0m"); LOG.info("\u001b[0;32mInformation message.\u001b[0m"); LOG.debug("\u001b[0;36mDebug message.\u001b[0m"); LOG.trace("\u001b[0;30mTrace message.\u001b[0m"); LOG.error("\u001b[1;31mError message.\u001b[0m", new IOException("test")); } }
public static InterClusterRequestEvaluator instantiateInterClusterRequestEvaluator(final String clazz, final Settings settings) { try { final Class<?> clazz0 = Class.forName(clazz); final InterClusterRequestEvaluator ret = (InterClusterRequestEvaluator) clazz0.getConstructor(Settings.class).newInstance(settings); addLoadedModule(clazz0); return ret; } catch (final Throwable e) { log.warn("Unable to load inter cluster request evaluator '{}' due to {}", clazz, e.toString()); if(log.isDebugEnabled()) { log.debug("Stacktrace: ",e); } return new DefaultInterClusterRequestEvaluator(settings); } }
@Override public void success(String type, Tuple<Long, Settings> settings) { if(latch.getCount() <= 0) { log.error("Latch already counted down (for {} of {}) (index={})", type, Arrays.toString(events), searchguardIndex); } rs.put(type, settings); latch.countDown(); if(log.isDebugEnabled()) { log.debug("Received config for {} (of {}) with current latch value={}", type, Arrays.toString(events), latch.getCount()); } }
@Override public void disconnect() { try { super.disconnect(); // Save existing StatAlert Definitions saveAlertDefinitionsAsSerializedObjects(); /* Remove Cache Listener to listen to Cache & Region create/destroy events */ if (logger.isDebugEnabled()) { logger.debug("Removing CacheAndRegionListener .... "); } removeCacheListener(cacheRegionListener); } catch (RuntimeException e) { logger.warn(e.getMessage(), e); throw e; } catch (VirtualMachineError err) { SystemFailure.initiateFailure(err); // If this ever returns, re-throw the error. We're poisoned // now, so don't let this thread continue. throw err; } catch (Error e) { // Whenever you catch Error or Throwable, you must also // catch VirtualMachineError (see above). However, there is // _still_ a possibility that you are dealing with a cascading // error condition, so you also need to check to see if the JVM // is still usable: SystemFailure.checkFailure(); logger.error(e.getMessage(), e); throw e; } }
public LlapWrappedAppender(final String name, final Node node, final Configuration config, boolean renameOnClose, String renamedFileSuffix) { super(name, null, null); this.node = node; this.config = config; this.renameFileOnClose = renameOnClose; this.renamedFileSuffix = renamedFileSuffix; if (LOGGER.isDebugEnabled()) { LOGGER.debug( LlapWrappedAppender.class.getName() + " created with name=" + name + ", renameOnClose=" + renameOnClose + ", renamedFileSuffix=" + renamedFileSuffix); } }
public static DashBoard deepCopy(DashBoard dash) { if (dash == null) { return null; } try { TokenBuffer tb = new TokenBuffer(JsonParser.MAPPER, false); JsonParser.MAPPER.writeValue(tb, dash); return JsonParser.MAPPER.readValue(tb.asParser(), DashBoard.class); } catch (Exception e) { log.error("Error during deep copy of dashboard. Reason : {}", e.getMessage()); log.debug(e); } return null; }
private static void delete(Holder holder, Channel channel, int msgId, User user, DashBoard dash, int... deviceIds) { holder.blockingIOProcessor.executeHistory(() -> { try { for (int deviceId : deviceIds) { int removedCounter = holder.reportingDiskDao.delete(user, dash.id, deviceId); log.debug("Removed {} files for dashId {} and deviceId {}", removedCounter, dash.id, deviceId); } channel.writeAndFlush(ok(msgId), channel.voidPromise()); } catch (Exception e) { log.warn("Error removing device data. Reason : {}.", e.getMessage()); channel.writeAndFlush(illegalCommand(msgId), channel.voidPromise()); } }); }