/** * Add an event to the queue. It will be processed in the order received. * * @param event Event */ public void addEvent(Event event) { if(event == null) throw new IllegalStateException("event must be non-null"); if(logger.isTraceEnabled()) logger.trace("Adding event " + event); eventQueue.add(event); }
for(SelectionKey sk: selector.keys()) { try { if(logger.isTraceEnabled()) logger.trace("Closing SelectionKey's channel"); logger.warn(e.getMessage(), e); if(logger.isTraceEnabled()) logger.trace("Cancelling SelectionKey"); logger.warn(e.getMessage(), e); logger.warn(e.getMessage(), e);
public void run() { if(logger.isInfoEnabled()) logger.info("Server now listening for connections on port " + port); if(Thread.currentThread().isInterrupted()) { if(logger.isInfoEnabled()) logger.info("Acceptor thread interrupted"); logger.warn("Claimed accept but nothing to select"); logger.error("No healthy selector could be found for channel " + socketChannel + " number of selectors " + totalSelectors + " closing the socket. "); IOUtils.closeQuietly(socketChannel); if (logger.isTraceEnabled()) logger.trace("Acceptor thread interrupted, closing"); logger.warn(e.getMessage(), e); logger.info("Server has stopped listening for connections on port " + port);
@Override public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys, Map<ByteArray, byte[]> transforms) throws VoldemortException { // TODO Does RocksDB multiget supports atomicity ? StoreUtils.assertValidKeys(keys); long startTimeNs = -1; if(logger.isTraceEnabled()) startTimeNs = System.nanoTime(); Map<ByteArray, List<Versioned<byte[]>>> results = null; try { results = StoreUtils.getAll(this, keys, transforms); } catch(PersistenceFailureException e) { logger.error(e); throw new PersistenceFailureException(e); } finally { if(logger.isTraceEnabled()) { String keyStr = ""; for(ByteArray key: keys) keyStr += key + " "; logger.trace("Completed GETALL (" + getName() + ") from keys " + keyStr + " in " + (System.nanoTime() - startTimeNs) + " ns at " + System.currentTimeMillis()); } } return results; }
@Override public synchronized void put(K key, Versioned<V> value, T transforms) throws VoldemortException { // try to delete from assertion // do real put if has not been asserted Boolean result = assertionMap.remove(key); if(result == null) { logger.info("PUT key: " + key + " (never asserted) assertionMap size: " + assertionMap.size()); super.put(key, value, transforms); if(logger.isTraceEnabled()) { logger.trace("PUT key: " + key + " (never asserted) assertionMap size: " + assertionMap.size()); } } else { logger.info("PUT key: " + key + " (found and fulfills put assertion) assertionMap size: " + assertionMap.size()); if(logger.isDebugEnabled()) { logger.debug("PUT key: " + key + " (found and fulfills put assertion) assertionMap size: " + assertionMap.size()); } } }
@Override public void run() { if (!ProxyUtil.isChecked(ShowOnConsole.YES)) { return; } try { LOGGER.trace("Checking ip address..."); String addressIp = ConnectionUtil.getSource("http://checkip.amazonaws.com"); LOGGER.info("Your public IP address is " + addressIp); } catch (MalformedURLException e) { LOGGER.warn("Malformed URL: "+ e.getMessage(), e); } catch (IOException e) { LOGGER.warn("Error during AWS test: "+ e.getMessage(), e); } }
@Override public synchronized List<Versioned<V>> multiVersionPut(K key, final List<Versioned<V>> values) { Boolean result = assertionMap.remove(key); if(result == null) { if(logger.isTraceEnabled()) { logger.trace("PUT key: " + key + " (never asserted) assertionMap size: " + assertionMap.size()); } } else { if(logger.isDebugEnabled()) { logger.debug("PUT key: " + key + " (found and fulfills put assertion) assertionMap size: " + assertionMap.size()); } } List<Versioned<V>> obsoleteVals = super.multiVersionPut(key, values); return obsoleteVals; }
private void getI18nLanguage() throws IOException { try { String pageSourceLanguage = ConnectionUtil.getSourceLineFeed( "https://raw.githubusercontent.com/ron190/jsql-injection/master/web/services/i18n/jsql_"+ DialogTranslate.this.language.getNameLocale() +".properties" ); this.languageProperties.load(new StringReader(pageSourceLanguage)); LOGGER.info("Text for "+ DialogTranslate.this.language +" translation loaded from Github"); } catch (IOException e) { this.languageProperties.load(new StringReader(new String(Files.readAllBytes(Paths.get("/com/jsql/i18n/jsql_"+ DialogTranslate.this.language.getNameLocale() +".properties"))))); LOGGER.info("Text for "+ DialogTranslate.this.language +" translation loaded from local"); // Ignore IgnoreMessageException exceptionIgnored = new IgnoreMessageException(e); LOGGER.trace(exceptionIgnored, exceptionIgnored); } }
@Override public void checkApplicability() throws StoppedByUserSlidingException { if (MediatorModel.model().getVendor().instance().sqlTestBlindFirst() == null) { LOGGER.info("No Blind strategy known for "+ MediatorModel.model().getVendor()); } else { LOGGER.trace(I18n.valueByKey("LOG_CHECKING_STRATEGY") +" Blind..."); this.blind = new InjectionBlind(); this.isApplicable = this.blind.isInjectable(); if (this.isApplicable) { LOGGER.debug(I18n.valueByKey("LOG_VULNERABLE") +" Blind injection"); this.allow(); Request requestMessageBinary = new Request(); requestMessageBinary.setMessage(Interaction.MESSAGE_BINARY); requestMessageBinary.setParameters(this.blind.getInfoMessage()); MediatorModel.model().sendToViews(requestMessageBinary); } else { this.unallow(); } } }
FileUtils.writeStringToFile(metadataFile, metadata.toJsonString()); } catch(IOException e) { logger.error("Cannot create metadata file ", e); throw new IOException("Unable to create metadata file " + metadataFile); metadata = new ReadOnlyStorageMetadata(metadataFile); } catch(IOException e) { logger.warn("Cannot read metadata file, assuming default values"); logger.trace("Opened chunked file set for " + baseDir + " with " + indexFileSizes.size() + " chunks and format " + storageFormat);
@Override protected void runInContext() { try { TimeZone.getDefault(); List<ProjectInvitationVO> invitationsToExpire = _projectInvitationDao.listInvitationsToExpire(_invitationTimeOut); if (!invitationsToExpire.isEmpty()) { s_logger.debug("Found " + invitationsToExpire.size() + " projects to expire"); for (ProjectInvitationVO invitationToExpire : invitationsToExpire) { invitationToExpire.setState(ProjectInvitation.State.Expired); _projectInvitationDao.update(invitationToExpire.getId(), invitationToExpire); s_logger.trace("Expired project invitation id=" + invitationToExpire.getId()); } } } catch (Exception ex) { s_logger.warn("Exception while running expired invitations cleanup", ex); } } }
private void runWithContext() { HaWorkVO work = null; try { s_logger.trace("Checking the database for work"); work = _haDao.take(_serverId); if (work == null) { try { synchronized (this) { wait(_timeToSleep); } return; } catch (final InterruptedException e) { s_logger.info("Interrupted"); return; } } NDC.push("work-" + work.getId()); s_logger.info("Processing work " + work); processWork(work); } catch (final Throwable th) { s_logger.error("Caught this throwable, ", th); } finally { if (work != null) { NDC.pop(); } } }
nodeIdList = this.storeInstance.getReplicationNodeList(masterPartitionId); } catch(Exception exception) { logger.info("Aborting fixKey due to bad init."); if(logger.isDebugEnabled()) { exception.printStackTrace(); if(logger.isTraceEnabled()) { if(toReadRepair.size() == 0) { logger.trace("Nothing to repair"); logger.trace(nodeValue.getNodeId() + " --- " + nodeValue.getKey().toString());
@Override public Role findRole(Long id) { if (id == null || id < 1L) { logger.trace(String.format("Role ID is invalid [%s]", id)); return null; } RoleVO role = roleDao.findById(id); if (role == null) { logger.trace(String.format("Role not found [id=%s]", id)); return null; } Account account = getCurrentAccount(); if (!accountManager.isRootAdmin(account.getId()) && RoleType.Admin == role.getRoleType()) { logger.debug(String.format("Role [id=%s, name=%s] is of 'Admin' type and is only visible to 'Root admins'.", id, role.getName())); return null; } return role; }
@Override public void run() { logger.trace("About to process key " + badKey + " (" + myName() + ")"); Status status = doConsistencyFix(); logger.trace("Finished processing key " + badKey + " (" + myName() + ")"); consistencyFix.getStats().incrementFixCount(); if(status != Status.SUCCESS) { try { badKeyQOut.put(new BadKeyStatus(badKey, status)); } catch(InterruptedException ie) { logger.warn("Worker thread " + myName() + " interrupted."); } consistencyFix.getStats().incrementFailures(status); } }
@Override protected void stopInner() { if(logger.isEnabledFor(Level.INFO)) logger.info("Stopping Voldemort NIO socket server (" + serviceName + ") on port " + port); if(logger.isTraceEnabled()) logger.trace("Interrupted acceptor thread, waiting " + SHUTDOWN_TIMEOUT_MS + " ms for termination"); logger.warn("Acceptor thread pool did not stop cleanly after " + SHUTDOWN_TIMEOUT_MS + " ms"); logger.warn(e.getMessage(), e); } catch(Exception e) { if(logger.isEnabledFor(Level.WARN)) logger.warn(e.getMessage(), e); if(logger.isTraceEnabled()) logger.trace("Shut down SelectorManager thread pool acceptor, waiting " + SHUTDOWN_TIMEOUT_MS + " ms for termination");
private List<Versioned<byte[]>> getValueForKey(ByteArray key, byte[] transforms) throws PersistenceFailureException { long startTimeNs = -1; if(logger.isTraceEnabled()) startTimeNs = System.nanoTime(); List<Versioned<byte[]>> value = null; try { byte[] result = getRocksDB().get(storeHandle, key.get()); if(result != null) { value = StoreBinaryFormat.fromByteArray(result); } else { return Collections.emptyList(); } } catch(RocksDBException e) { logger.error(e); throw new PersistenceFailureException(e); } finally { if(logger.isTraceEnabled()) { logger.trace("Completed GET (" + getName() + ") from key " + key + " (keyRef: " + System.identityHashCode(key) + ") in " + (System.nanoTime() - startTimeNs) + " ns at " + System.currentTimeMillis()); } } return value; }
singlePartition.add(partitionId); if(logger.isDebugEnabled()) { logger.debug("Start fetch request to Node[" + clusterNode.toString() + "] for partition[" + partitionId + "] of store[" + storeName + "]"); if(logger.isTraceEnabled()) { logger.trace("fetched " + new String(key.get())); logger.trace("map has keys: " + keyValueNodeSetMap.size()); if(logger.isTraceEnabled()) { logger.trace("sweeped; keys left: " + keyValueNodeSetMap.size()); if(report != null) { for(String line: report.split("\n")) { logger.info(line);
@Override public void create() throws ResourceAllocationException { try { UserVm vm = _userVmService.createVirtualMachine(this); if (vm != null) { setEntityId(vm.getId()); setEntityUuid(vm.getUuid()); } else { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to deploy vm"); } } catch (InsufficientCapacityException ex) { s_logger.info(ex); s_logger.trace(ex.getMessage(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } catch (ResourceUnavailableException ex) { s_logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { s_logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (ResourceAllocationException ex) { s_logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage()); } } }