private void printErrorLog(Map<Integer, List<DelayedEntry>> failsPerPartition) { int size = 0; final Collection<List<DelayedEntry>> values = failsPerPartition.values(); for (Collection<DelayedEntry> value : values) { size += value.size(); } final String logMessage = String.format("Map store flush operation can not be done for %d entries", size); logger.severe(logMessage); }
@Override public void onFailure(Throwable t) { logger.severe("Error writing to snapshot map", t); firstError.compareAndSet(null, t); numActiveFlushes.decrementAndGet(); numConcurrentAsyncOps.decrementAndGet(); } };
void releaseInternal(FileLock lock, FileChannel channel) { try { lock.release(); channel.close(); } catch (IOException e) { logger.severe("Problem while releasing the lock and closing channel on " + lockFile, e); } finally { lockFile.deleteOnExit(); } }
public void run() { Collection<Member> members = clusterService.getMembers(MemberSelectors.NON_LOCAL_MEMBER_SELECTOR); for (Member member : members) { try { runPingTask(member); } catch (Throwable e) { logger.severe(e); } } } }, icmpIntervalMillis, icmpIntervalMillis, TimeUnit.MILLISECONDS);
protected final void notifyCloseListeners() { for (ChannelCloseListener closeListener : closeListeners) { // it is important we catch exceptions so that other listeners aren't obstructed when // one of the listeners is throwing an exception try { closeListener.onClose(AbstractChannel.this); } catch (Exception e) { logger.severe(format("Failed to process closeListener [%s] on channel [%s]", closeListener, this), e); } } } }
private void logIgnoredCompletion(@Nullable Throwable failure, JobStatus status) { if (failure != null) { logger.severe("Ignoring failure completion of " + idToString(jobId) + " because status is " + status, failure); } else { logger.severe("Ignoring completion of " + idToString(jobId) + " because status is " + status); } }
private void process(ClientMessage response) { try { handleResponse(response); } catch (Exception e) { logger.severe("Failed to process response: " + response + " on responseThread: " + Thread.currentThread().getName(), e); } }
public void reserveAddBackup(long itemId, String transactionId) { TxCollectionItem item = new TxCollectionItem(itemId, null, transactionId, false); Object o = txMap.put(itemId, item); if (o != null) { logger.severe("Transaction reservation item already exists on the backup member." + " Reservation item ID: " + itemId); } }
@Override public void logError(Throwable e) { if (e instanceof TransactionException) { getLogger().severe(e.getMessage()); } else { super.logError(e); } }
@Override public void logError(Throwable e) { if (e instanceof TransactionException) { getLogger().severe(e.getMessage()); } else { super.logError(e); } }
@Override public void run() { try { clientContext.getLifecycleService().shutdown(); } catch (Exception exception) { logger.severe("Exception during client shutdown ", exception); } } }, clientContext.getName() + ".clientShutdown-").start();
private void process(Runnable task) { processed.inc(); try { task.run(); } catch (Throwable e) { OutOfMemoryErrorDispatcher.inspectOutOfMemoryError(e); logger.severe(getName() + " caught an exception while processing:" + task, e); } }
private void doRun() throws InterruptedException { while (!shutdown) { Packet response = responseQueue.take(); try { inboundResponseHandler.accept(response); } catch (Throwable e) { inspectOutOfMemoryError(e); logger.severe("Failed to process response: " + response + " on:" + getName(), e); } } }
public void run() { for (final ClientConnection connection : clientConnectionManager.getActiveConnections()) { try { clientExecutionService.getUserExecutor().execute(new PeriodicPingTask(connection)); } catch (Throwable e) { logger.severe(e); } } } }, icmpIntervalMillis, icmpIntervalMillis, TimeUnit.MILLISECONDS);
@Override public CacheMergePolicy createNew(String className) { try { return newInstance(nodeEngine.getConfigClassLoader(), className); } catch (Exception e) { nodeEngine.getLogger(getClass()).severe(e); throw new InvalidConfigurationException("Invalid cache merge policy: " + className, e); } } };
@Override public void run() { final NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine(); final Node node = nodeEngine.getNode(); final ILogger logger = nodeEngine.getLogger("com.hazelcast.security"); logger.severe("Node could not join cluster. Authentication failed on master node! Node is going to shutdown now!"); node.shutdown(true); }
@Override public void close() { try { closeCurrentFile(); getLogger().fine("Closing StreamFilesP"); watcher.close(); } catch (IOException e) { getLogger().severe("Failed to close StreamFilesP", e); } finally { watcher = null; } }
private void checkDeserializationFailure(ClusterServiceImpl clusterService) { if (deserializationFailure != null) { getLogger().severe("Node could not join cluster.", deserializationFailure); Node node = clusterService.getNodeEngine().getNode(); node.shutdown(true); throw ExceptionUtil.rethrow(deserializationFailure); } }
private void registerInvalidationListener() { try { invalidationListenerId = addNearCacheInvalidationListener(new ReplicatedMapAddNearCacheEventHandler()); } catch (Exception e) { ILogger logger = getContext().getLoggingService().getLogger(ClientReplicatedMapProxy.class); logger.severe("-----------------\nNear Cache is not initialized!\n-----------------", e); } }