public void debug(Object message) { if (message instanceof String || this.logger.isDebugEnabled()) { this.logger.debug(String.valueOf(message)); } }
public static void setClipboardString(String text) { try { Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard(); Transferable transferable = new StringSelection(text); clipboard.setContents(transferable, null); LOG.debug("String '{}' copied to clipboard", text); } catch (Exception e) { LOG.error("Failed copy string '{}' to clipboard", text, e); } } }
public void printMissingClasses() { int count = missingClasses.size(); if (count == 0) { return; } LOG.warn("Found {} references to unknown classes", count); if (LOG.isDebugEnabled()) { List<String> clsNames = new ArrayList<>(missingClasses); Collections.sort(clsNames); for (String cls : clsNames) { LOG.debug(" {}", cls); } } } }
public void deleteBulkLoadedRows(List<byte[]> rows) throws IOException { try (Table table = connection.getTable(bulkLoadTableName)) { List<Delete> lstDels = new ArrayList<>(); for (byte[] row : rows) { Delete del = new Delete(row); lstDels.add(del); LOG.debug("orig deleting the row: " + Bytes.toString(row)); } table.delete(lstDels); LOG.debug("deleted " + rows.size() + " original bulkload rows"); } }
private void add(HttpMethod method, String url, String acceptedType, Object target) { RouteEntry entry = new RouteEntry(); entry.httpMethod = method; entry.path = url; entry.target = target; entry.acceptedType = acceptedType; LOG.debug("Adds route: " + entry); // Adds to end of list routes.add(entry); }
private void updateFileLists(List<String> activeFiles, List<String> archiveFiles) throws IOException { List<String> newlyArchived = new ArrayList<>(); for (String spath : activeFiles) { if (!fs.exists(new Path(spath))) { newlyArchived.add(spath); } } if (newlyArchived.size() > 0) { activeFiles.removeAll(newlyArchived); archiveFiles.addAll(newlyArchived); } LOG.debug(newlyArchived.size() + " files have been archived."); }
public List<HBMessage> sendAll(HBMessage m) throws PacemakerConnectionException, InterruptedException { List<HBMessage> responses = new ArrayList<HBMessage>(); LOG.debug("Using servers: {}", servers); for (String s : servers) { try { HBMessage response = getClientForServer(s).send(m); responses.add(response); } catch (PacemakerConnectionException e) { LOG.warn("Failed to connect to the pacemaker server {}, attempting to reconnect", s); getClientForServer(s).reconnect(); } } if (responses.size() == 0) { throw new PacemakerConnectionException("Failed to connect to any Pacemaker."); } return responses; }
@Override public void log (int level, String category, String message, Throwable ex) { final String logString = "[KRYO " + category + "] " + message; switch (level) { case Log.LEVEL_ERROR: log.error(logString, ex); break; case Log.LEVEL_WARN: log.warn(logString, ex); break; case Log.LEVEL_INFO: log.info(logString, ex); break; case Log.LEVEL_DEBUG: log.debug(logString, ex); break; case Log.LEVEL_TRACE: log.trace(logString, ex); break; } } }
protected void logException() { if (exception instanceof JobNotFoundException || exception instanceof ActivitiTaskAlreadyClaimedException) { // reduce log level, because this may have been caused because of job deletion due to cancelActiviti="true" log.info("Error while closing command context", exception); } else if (exception instanceof ActivitiOptimisticLockingException) { // reduce log level, as normally we're not interested in logging this exception log.debug("Optimistic locking exception : " + exception); } else { log.error("Error while closing command context", exception); } }
private List<AwsEndpoint> getClusterEndpointsFromConfig() { String[] availZones = clientConfig.getAvailabilityZones(clientConfig.getRegion()); String myZone = InstanceInfo.getZone(availZones, myInstanceInfo); Map<String, List<String>> serviceUrls = EndpointUtils .getServiceUrlsMapFromConfig(clientConfig, myZone, clientConfig.shouldPreferSameZoneEureka()); List<AwsEndpoint> endpoints = new ArrayList<>(); for (String zone : serviceUrls.keySet()) { for (String url : serviceUrls.get(zone)) { try { endpoints.add(new AwsEndpoint(url, getRegion(), zone)); } catch (Exception ignore) { logger.warn("Invalid eureka server URI: {}; removing from the server pool", url); } } } logger.debug("Config resolved to {}", endpoints); if (endpoints.isEmpty()) { logger.error("Cannot resolve to any endpoints from provided configuration: {}", serviceUrls); } return endpoints; }
@Override public void onCheckMessage(long msgId, ChannelHandlerContext ctx, ServerMessageSender sender) { try { sender.sendResponse(msgId, ctx.channel(), HeartbeatMessage.PONG); } catch (Throwable throwable) { LOGGER.error("", "send response error", throwable); } if (LOGGER.isDebugEnabled()) { LOGGER.debug("received PING from " + ctx.channel().remoteAddress()); } }
@Override public ResourceModel processResourceModel(ResourceModel resourceModel, Configuration configuration) { LOG.debug("Map for resource model <" + resourceModel + ">:"); final List<Resource> resources = new ArrayList<>(); for (Resource resource : resourceModel.getResources()) { resources.add(resource); resources.addAll(findChildResources(resource)); } logResources(resources); return resourceModel; }
@Override public void initializeState(StateInitializationContext context) throws Exception { super.initializeState(context); checkState(checkpointedState == null, "The reader state has already been initialized."); checkpointedState = context.getOperatorStateStore().getSerializableListState("splits"); int subtaskIdx = getRuntimeContext().getIndexOfThisSubtask(); if (context.isRestored()) { LOG.info("Restoring state for the {} (taskIdx={}).", getClass().getSimpleName(), subtaskIdx); // this may not be null in case we migrate from a previous Flink version. if (restoredReaderState == null) { restoredReaderState = new ArrayList<>(); for (TimestampedFileInputSplit split : checkpointedState.get()) { restoredReaderState.add(split); } if (LOG.isDebugEnabled()) { LOG.debug("{} (taskIdx={}) restored {}.", getClass().getSimpleName(), subtaskIdx, restoredReaderState); } } } else { LOG.info("No state to restore for the {} (taskIdx={}).", getClass().getSimpleName(), subtaskIdx); } }
static void logException(String msg, Exception e) { if (LOG.isDebugEnabled()) { LOG.debug(msg, e); } else { LOG.info(msg + ": " + e.getMessage()); } }
private static Release checkForNewRelease() throws IOException { String version = JadxDecompiler.getVersion(); if (version.contains("dev")) { LOG.debug("Ignore check for update: development version"); return null; } List<Release> list = get(GITHUB_RELEASES_URL, RELEASES_LIST_TYPE); if (list == null) { return null; } list.removeIf(release -> release.getName().equalsIgnoreCase(version) || release.isPreRelease()); if (list.isEmpty()) { return null; } list.sort(RELEASE_COMPARATOR); Release latest = list.get(list.size() - 1); if (VersionComparator.checkAndCompare(version, latest.getName()) >= 0) { return null; } LOG.info("Found new jadx version: {}", latest); return latest; }
@Override public synchronized void shutdown() { if (!canShutdown()) { LOG.debug("ZooKeeper server is not running, so not proceeding to shutdown!"); return; } LOG.info("Shutting down"); try { super.shutdown(); } catch (Exception e) { LOG.warn("Ignoring unexpected exception during shutdown", e); } try { if (syncProcessor != null) { syncProcessor.shutdown(); } } catch (Exception e) { LOG.warn("Ignoring unexpected exception in syncprocessor shutdown", e); } } }
protected boolean runImpl() { try { Message message = consumer.receive(); if (message == null) { LOG.debug("Message consumer was closed."); return true; } ObjectMessage omessage = (ObjectMessage) message; daemonThreadStatsCollector.captureStats(thread.getId()); listener.onMessage((GoMessage) omessage.getObject()); } catch (JMSException e) { LOG.warn("Error receiving message. Message receiving will continue despite this error.", e); } catch (Exception e) { LOG.error("Exception thrown in message handling by listener {}", listener, e); } finally { daemonThreadStatsCollector.clearStats(thread.getId()); } return false; }
private boolean threadShouldExit(long now, long curHardShutdownTimeMs) { if (!hasActiveExternalCalls()) { log.trace("All work has been completed, and the I/O thread is now exiting."); return true; } if (now >= curHardShutdownTimeMs) { log.info("Forcing a hard I/O thread shutdown. Requests in progress will be aborted."); return true; } log.debug("Hard shutdown in {} ms.", curHardShutdownTimeMs - now); return false; }
@Override public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) { if (exception != null) { if (exception instanceof RetriableException) { log.debug("Asynchronous auto-commit of offsets {} failed due to retriable error: {}", offsets, exception); nextAutoCommitTimer.updateAndReset(retryBackoffMs); } else { log.warn("Asynchronous auto-commit of offsets {} failed: {}", offsets, exception.getMessage()); } } else { log.debug("Completed asynchronous auto-commit of offsets {}", offsets); } } });
@Override protected void rebalanceCache() { try { getLogger().info("Rebalancing: " + this.cache); RebalanceResults results = RegionHelper.rebalanceCache(this.cache); if (getLogger().isDebugEnabled()) { getLogger().debug("Done rebalancing: " + this.cache); getLogger().debug(RegionHelper.getRebalanceResultsMessage(results)); } } catch (Exception e) { getLogger().warn("Rebalance failed because of the following exception:", e); } }