static void roll() { try { Socket socket = new Socket(host, port); DataOutputStream dos = new DataOutputStream(socket.getOutputStream()); DataInputStream dis = new DataInputStream(socket.getInputStream()); dos.writeUTF(ExternallyRolledFileAppender.ROLL_OVER); String rc = dis.readUTF(); if(ExternallyRolledFileAppender.OK.equals(rc)) { cat.info("Roll over signal acknowledged by remote appender."); } else { cat.warn("Unexpected return code "+rc+" from remote entity."); System.exit(2); } } catch(IOException e) { cat.error("Could not send roll signal on host "+host+" port "+port+" .", e); System.exit(2); } System.exit(0); } }
private void logFailureAndWait(String action, String cause, int attempt, Exception e) throws InterruptedException { String retryMessage; if (attempt < maxAttempts) { retryMessage = ", will wait " + waitBetweenRetries + " ms until next retry."; } else { retryMessage = ", no further attempts will be performed."; } String fullMessage = "Failed to " + action + " because " + cause + ". Attempt #" + attempt + "/" + maxAttempts + retryMessage; if (e == null) { logger.warn(fullMessage); } else { logger.error(fullMessage, e); } Thread.sleep(waitBetweenRetries); }
private void registerMbean(final String name, final Object mbean) { final Class<?> mbeanClass = mbean.getClass(); final ObjectName mbeanName; try { mbeanName = new ObjectName(mbeanClass.getName() + ":name=" + name); this.mbeanServer.registerMBean(mbean, mbeanName); logger.info("Bean " + mbeanClass.getCanonicalName() + " registered."); this.registeredMBeans.add(mbeanName); } catch (final Exception e) { logger.error("Error registering mbean " + mbeanClass.getCanonicalName(), e); } }
public void run() { try { DataSetStats totals = new DataSetStats(); List<String> names = new ArrayList<String>(); List<DataSetStats> stats = new ArrayList<DataSetStats>(); for(StorageEngine<ByteArray, byte[], byte[]> store: storeRepository.getAllStorageEngines()) { if(store instanceof ReadOnlyStorageEngine || store instanceof ViewStorageEngine || store instanceof MetadataStore) continue; logger.info(store.getClass()); logger.info("Calculating stats for '" + store.getName() + "'..."); DataSetStats curr = calculateStats(store); names.add(store.getName()); stats.add(curr); totals.add(curr); } for(int i = 0; i < names.size(); i++) logger.info("\n\nData statistics for store '" + names.get(i) + "':\n" + stats.get(i) + "\n\n"); logger.info("Totals: \n " + totals + "\n\n"); } catch(Exception e) { logger.error("Error in thread: ", e); } } });
for (final File f : validatorDir.listFiles()) { if (f.getName().endsWith(".jar")) { resources.add(f.toURI().toURL()); if (resourceTimestamps.get(f.getName()) == null || resourceTimestamps.get(f.getName()) != f.lastModified()) { reloadResources = true; logger.info("Resource " + f.getName() + " is updated. Reload the classloader."); resourceTimestamps.put(f.getName(), f.lastModified()); logger.error("Cannot reload validator classloader because failure " + "to close the validator classloader.", e); validatorLoader = new ValidatorClassLoader(resources.toArray(new URL[resources.size()]));
@Override protected void stopInner() throws VoldemortException { List<VoldemortException> exceptions = new ArrayList<VoldemortException>(); /* Stop in reverse order */ for(VoldemortService service: Utils.reversed(services)) { try { service.stop(); } catch(VoldemortException e) { exceptions.add(e); logger.error(e); } } if(exceptions.size() > 0) { throw exceptions.get(0); } }
public static Condition fromJson(final Object obj) throws Exception { if (checkerLoader == null) { throw new Exception("Condition Checker loader not initialized!"); } final Map<String, Object> jsonObj = (HashMap<String, Object>) obj; Condition cond = null; try { final Map<String, ConditionChecker> checkers = new HashMap<>(); final List<Object> checkersJson = (List<Object>) jsonObj.get("checkers"); for (final Object oneCheckerJson : checkersJson) { final Map<String, Object> oneChecker = (HashMap<String, Object>) oneCheckerJson; final String type = (String) oneChecker.get("type"); final ConditionChecker ck = checkerLoader.createCheckerFromJson(type, oneChecker.get("checkerJson")); checkers.put(ck.getId(), ck); } final String expr = (String) jsonObj.get("expression"); final Long nextCheckTime = Long.valueOf((String) jsonObj.get("nextCheckTime")); cond = new Condition(checkers, expr, nextCheckTime); } catch (final Exception e) { e.printStackTrace(); logger.error("Failed to recreate condition from json.", e); throw new Exception("Failed to recreate condition from json.", e); } return cond; }
logger.error("Cannot run slop pusher job since Voldemort server is rebalancing"); return; logger.info("Started streaming slop pusher job at " + startTime); if(zoneMapping.get(zone.getId()) == null || zoneMapping.get(zone.getId()).size() == 0) zonesDown++; logger.info("Completed streaming slop pusher job at " + startTime + " early because " + zonesDown + " zones are down"); stopAdminClient(); attemptedByNode.put(node.getId(), 0L); succeededByNode.put(node.getId(), 0L); logger.info("Attempted pushing " + attemptedPushes + " slops"); consumerResults.add(consumerExecutor.submit(new SlopConsumer(nodeId, slopQueue, slopStorageEngine))); logger.warn("Interrupted exception", e); terminatedEarly = true; } catch(Exception e) { logger.error(e, e); terminatedEarly = true; } finally {
Map<String, Long> storeToMaxVersion = Maps.newHashMapWithExpectedSize(storeNames.size()); for(String storeName: storeNames) { storeToMaxVersion.put(storeName, 0L); Map<String, Long> currentNodeVersions = getROMaxVersion(node.getId(), storeNames); for(String storeName: currentNodeVersions.keySet()) { Long maxVersion = storeToMaxVersion.get(storeName); if(maxVersion != null && maxVersion < currentNodeVersions.get(storeName)) { storeToMaxVersion.put(storeName, currentNodeVersions.get(storeName)); nodeFailures++; if (nodeFailures > maxNodeFailures) { logger.error("Got an exception while trying to reach node " + node.getId() + ". " + nodeFailures + " node failure(s) so far; maxNodeFailures exceeded, rethrowing."); throw e; } else { logger.warn("Got an exception while trying to reach node " + node.getId() + ". " + nodeFailures + " node failure(s) so far; continuing.", e);
final SlaOption sla; try { sla = parseSlaSetting(settings.get(set)); } catch (final Exception e) { throw new ServletException(e); sla.getInfo().put(SlaOption.INFO_FLOW_NAME, sched.getFlowName()); sla.getInfo().put(SlaOption.INFO_EMAIL_LIST, slaEmails); slaOptions.add(sla); ret.put("error", e.getMessage()); } catch (final ScheduleManagerException e) { logger.error(e.getMessage(), e); ret.put("error", e.getMessage());
private List<String> fetchResourceIdOrderedList(final int projectId) { try { return this.databaseOperator.query(SQL_FETCH_PVR, rs -> { final List<String> results = new ArrayList<>(); while (rs.next()) { results.add(rs.getString(1)); } return results; }, projectId); } catch (final SQLException e) { log.error("Error performing cleanup of Project: " + projectId, e); } return Collections.emptyList(); } }
if(logger.isDebugEnabled()) { logger.debug("GET Metadata request received."); true, this.requestObject.getRequestOriginTimeInMs()); if(logger.isDebugEnabled()) { logger.debug("GET Metadata successful !"); if(logger.isDebugEnabled()) { logger.debug("GET request received."); boolean keyExists = false; List<Versioned<byte[]>> versionedValues = this.storeClient.getWithCustomTimeout(this.requestObject); if(versionedValues == null || versionedValues.size() == 0) { if(this.requestObject.getValue() != null) { if(versionedValues == null) { versionedValues = new ArrayList<Versioned<byte[]>>(); versionedValues.add(this.requestObject.getValue()); keyExists = true; if(versionedResponses == null || versionedResponses.values().size() == 0) { logger.error("Error when doing getall. Keys do not exist."); logger.error("Requested Key with the specified version does not exist"); RestErrorHandler.writeErrorResponse(this.messageEvent, NOT_FOUND,
String dir = response.getResponse(); try { logger.info("Attempting swap for " + node.briefToString() + ", dir = " + dir); previousDirs.put(node, adminClient.readonlyOps.swapStore(nodeId, storeName, dir)); logger.info("Swap succeeded for " + node.briefToString()); } catch(Exception e) { exceptions.put(node, e); try { int successfulNodeId = node.getId(); logger.info("Rolling back data on successful " + node.briefToString()); adminClient.readonlyOps.rollbackStore(successfulNodeId, storeName, ReadOnlyUtils.getVersionId(new File(previousDirs.get(node)))); logger.info("Rollback succeeded for " + node.briefToString()); } catch(Exception e) { logger.error("Exception thrown during rollback ( after swap ) operation on " + node.briefToString() + ": ", e); logger.error("Error on " + node.briefToString() + " during swap : ", exceptions.get(node));
if(logger.isDebugEnabled()) logger.debug("Trying to send hint to " + nodeId + " for key " + slop.getKey()); Store<ByteArray, Slop, byte[]> slopStore = slopStores.get(nodeId); Utils.notNull(slopStore); long startNs = System.nanoTime(); if(logger.isDebugEnabled()) logger.debug("Slop attempt to write " + slop.getKey() + " (keyRef: " + System.identityHashCode(slop.getKey()) + ") for " + failedNode + " to node " + node); failureDetector.recordException(node, (System.nanoTime() - startNs) / Time.NS_PER_MS, e); logger.warn("Error during hinted handoff. Will try another node", e); } catch(IllegalStateException e) { logger.warn("Error during hinted handoff. Will try another node", e); } catch(ObsoleteVersionException e) { logger.debug(e, e); } catch(Exception e) { logger.error("Unknown exception. Will try another node" + e); if(logger.isDebugEnabled()) logger.debug("Slop write of key " + slop.getKey() + " (keyRef: " + System.identityHashCode(slop.getKey()) + ") for " + failedNode logger.error("Slop write of key " + slop.getKey() + " (keyRef: " + System.identityHashCode(slop.getKey()) + ") for " + failedNode + " was not written.");
private void commitToVoldemort(List<String> storeNamesToCommit) { if(logger.isDebugEnabled()) { logger.debug("Trying to commit to Voldemort"); if(nodesToStream == null || nodesToStream.size() == 0) { if(logger.isDebugEnabled()) { logger.debug("No nodes to stream to. Returning."); logger.error("Exception during commit", e); hasError = true; if(!faultyNodes.contains(node.getId())) faultyNodes.add(node.getId()); logger.warn("StreamingSession may not have been initialized since Variable streamingresults is null. Skipping callback "); return; logger.info("Invoking the Recovery Callback"); Future future = streamingresults.submit(recoveryCallback); try { logger.error("Recovery Callback failed", e1); throw new VoldemortException("Recovery Callback failed"); } catch(ExecutionException e1) { MARKED_BAD = true; logger.error("Recovery Callback failed during execution", e1); throw new VoldemortException("Recovery Callback failed during execution");
private synchronized void flushData() { BufferedWriter writer = null; try { writer = new BufferedWriter(new FileWriter(new File(this.inputPath))); for(String key: this.metadataMap.keySet()) { writer.write(NEW_PROPERTY_SEPARATOR + key.toString() + "]" + NEW_LINE); writer.write(this.metadataMap.get(key).toString()); writer.write("" + NEW_LINE + "" + NEW_LINE); } writer.flush(); } catch(IOException e) { logger.error("IO exception while flushing data to file backed storage: " + e.getMessage()); } try { if(writer != null) writer.close(); } catch(Exception e) { logger.error("Error while flushing data to file backed storage: " + e.getMessage()); } }
logger.debug("Built lookup request as: " + lookupRequest.toString()); logger.error( String.format("Datastore Exception when reading (%s): %s %s", exception.getMessage(), logger.debug("Read entity: " + entity.toString()); result.put(name, new StringByteIterator(properties.get(name) .getStringValue()));
public static List<URL> getFilesFromURL(URL url) { List<URL> result = new ArrayList<>(); logger.debug("Checking " + url); logger.error("[!] Exception while loading album " + url, e); result.add(imgurImage.url); logger.error("[!] Exception while loading album " + url, e); } catch (IOException e) { logger.warn("Exception while retrieving gfycat page:", e); logger.warn("Exception while retrieving vidble page:", e); logger.warn("Exception while retrieving eroshare page:", e); return result; } catch (MalformedURLException e) { logger.error("[!] Not a valid URL: '" + url + "'", e); logger.error("[!] Error", ex); logger.error("[!] Unable to rip URL: " + url); return result;
@Override public void doAction() throws Exception { logger.info("Alerting on sla failure."); final Map<String, Object> alert = this.slaOption.getInfo(); if (alert.containsKey(SlaOption.ALERT_TYPE)) { final String alertType = (String) alert.get(SlaOption.ALERT_TYPE); final Alerter alerter = this.alerters.get(alertType); if (alerter != null) { try { final ExecutableFlow flow = this.executorLoader.fetchExecutableFlow(this.execId); alerter.alertOnSla(this.slaOption, SlaOption.createSlaMessage(this.slaOption, flow)); } catch (final Exception e) { e.printStackTrace(); logger.error("Failed to alert by " + alertType); } } else { logger.error("Alerter type " + alertType + " doesn't exist. Failed to alert."); } } }
@Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("insertkey: " + primaryKeyName + "-" + key + " from table: " + table); } Map<String, AttributeValue> attributes = createAttributes(values); // adding primary key attributes.put(primaryKeyName, new AttributeValue(key)); if (primaryKeyType == PrimaryKeyType.HASH_AND_RANGE) { // If the primary key type is HASH_AND_RANGE, then what has been put // into the attributes map above is the range key part of the primary // key, we still need to put in the hash key part here. attributes.put(hashKeyName, new AttributeValue(hashKeyValue)); } PutItemRequest putItemRequest = new PutItemRequest(table, attributes); try { dynamoDB.putItem(putItemRequest); } catch (AmazonServiceException ex) { LOGGER.error(ex); return Status.ERROR; } catch (AmazonClientException ex) { LOGGER.error(ex); return CLIENT_ERROR; } return Status.OK; }