throw new RE(ex, "Failure getting results for query[%s] from locations[%s] because of [%s]", query,
private void throwPersistErrorIfExists() { if (persistError != null) { throw new RE(persistError, "Error while persisting"); } }
static PidCgroupEntry parse(String entry) { // For example, entries with a port number will have an extra `:` in it somewhere, or ipv6 addresses. final String[] parts = entry.split(Pattern.quote(":"), 3); if (parts.length != 3) { throw new RE("Bad entry [%s]", entry); } final int heirarchyId = Integer.parseInt(parts[0]); final Set<String> controllers = new HashSet<>(Arrays.asList(parts[1].split(Pattern.quote(",")))); final Path path = Paths.get(parts[2]); return new PidCgroupEntry(heirarchyId, controllers, path); }
private ProcMountsEntry getMountEntry(final File procMounts, final String cgroup) { final List<String> lines; try { lines = Files.readLines(procMounts, Charsets.UTF_8); } catch (IOException e) { throw new RuntimeException(e); } for (final String line : lines) { final ProcMountsEntry entry = ProcMountsEntry.parse(line); if (CGROUP_TYPE.equals(entry.type) && entry.options.contains(cgroup)) { return entry; } } throw new RE("Cgroup [%s] not found", cgroup); }
private static CpuAcctMetric parse(final List<String> lines) { // File has a header. We skip it // See src/test/resources/cpuacct.usage_all for an example final int ncpus = lines.size() - 1; final long[] usrTime = new long[ncpus]; final long[] sysTime = new long[ncpus]; for (int i = 1; i < lines.size(); i++) { final String[] splits = lines.get(i).split(CgroupUtil.SPACE_MATCH, 3); if (splits.length != 3) { throw new RE("Error parsing [%s]", lines.get(i)); } final int cpuNum = Integer.parseInt(splits[0]); usrTime[cpuNum] = Long.parseLong(splits[1]); sysTime[cpuNum] = Long.parseLong(splits[2]); } return new CpuAcctMetric(usrTime, sysTime); }
private PidCgroupEntry getCgroupEntry(final File procCgroup, final String cgroup) { final List<String> lines; try { lines = Files.readLines(procCgroup, Charsets.UTF_8); } catch (IOException e) { throw new RuntimeException(e); } for (final String line : lines) { if (line.startsWith("#")) { continue; } final PidCgroupEntry entry = PidCgroupEntry.parse(line); if (entry.controllers.contains(cgroup)) { return entry; } } throw new RE("Hierarchy for [%s] not found", cgroup); }
@Override public Path discover(final String cgroup) { Preconditions.checkNotNull(cgroup, "cgroup required"); final File procMounts = new File(procDir, "mounts"); final File pidCgroups = new File(procDir, "cgroup"); final PidCgroupEntry pidCgroupsEntry = getCgroupEntry(pidCgroups, cgroup); final ProcMountsEntry procMountsEntry = getMountEntry(procMounts, cgroup); final File cgroupDir = new File( procMountsEntry.path.toString(), pidCgroupsEntry.path.toString() ); if (cgroupDir.exists() && cgroupDir.isDirectory()) { return cgroupDir.toPath(); } throw new RE("Invalid cgroup directory [%s]", cgroupDir); }
throw new RE( e, "Failure getting results for query[%s] url[%s] because of [%s]",
private long checkQueryTimeout() { long timeLeft = timeoutAt - System.currentTimeMillis(); if (timeLeft <= 0) { String msg = StringUtils.format("Query[%s] url[%s] timed out.", query.getId(), url); setupResponseReadFailure(msg, null); throw new RE(msg); } else { return timeLeft; } }
if (map == null) { return Response.status(Response.Status.NOT_FOUND) .entity(ServletResourceUtils.sanitizeException(new RE("No lookups found"))) .build(); if (tierLookups == null) { return Response.status(Response.Status.NOT_FOUND) .entity(ServletResourceUtils.sanitizeException(new RE("Tier [%s] not found", tier))) .build();
if (map == null) { return Response.status(Response.Status.NOT_FOUND) .entity(ServletResourceUtils.sanitizeException(new RE("lookup [%s] not found", lookup))) .build();
private void checkTotalBytesLimit(long bytes) { if (maxScatterGatherBytes < Long.MAX_VALUE && totalBytesGathered.addAndGet(bytes) > maxScatterGatherBytes) { String msg = StringUtils.format( "Query[%s] url[%s] max scatter-gather bytes limit reached.", query.getId(), url ); setupResponseReadFailure(msg, null); throw new RE(msg); } } };
public HttpResponseStatus getProxiedOverlordScalingResponseStatus() { try { StatusResponseHolder response = makeRequest( HttpMethod.GET, StringUtils.format( "%s/druid/indexer/v1/scaling", coordinator ) ); return response.getStatus(); } catch (Exception e) { throw new RE(e, "Unable to get scaling status from [%s]", coordinator); } }
throw new RE( "Error cancelling query[%s]: queriable node returned status[%d] [%s].", res.getStatus().getCode(),
throw new RE(fail.get()); throw new RE(fail.get()); return is; } else { throw new RE("Query[%s] url[%s] timed out.", query.getId(), url); String msg = StringUtils.format("Query[%s] url[%s] timed out.", query.getId(), url); setupResponseReadFailure(msg, null); throw new RE(msg); } else { return timeLeft; ); setupResponseReadFailure(msg, null); throw new RE(msg); throw new RE("Query[%s] url[%s] timed out.", query.getId(), url); throw new RE( "Error cancelling query[%s]: queriable node returned status[%d] [%s].", res.getStatus().getCode(),
@Nullable private Map.Entry<String, LookupExtractorFactoryContainer> startLookup(LookupBean lookupBean) { LookupExtractorFactoryContainer container = lookupBean.getContainer(); LOG.info("Starting lookup [%s]:[%s]", lookupBean.getName(), container); try { if (container.getLookupExtractorFactory().start()) { LOG.info("Started lookup [%s]:[%s]", lookupBean.getName(), container); return new AbstractMap.SimpleImmutableEntry<>(lookupBean.getName(), container); } else { LOG.error("Failed to start lookup [%s]:[%s]", lookupBean.getName(), container); return null; } } catch (RuntimeException e) { throw new RE(e, "Failed to start lookup [%s]:[%s]", lookupBean.getName(), container); } }
/** * Returns the "version" (aka last modified timestamp) of the URI * * @param uri The URI to check the last timestamp * * @return The time in ms of the last modification of the URI in String format * * @throws IOException */ @Override public String getVersion(URI uri) throws IOException { try { final FileObject object = buildFileObject(uri, s3Client); return StringUtils.format("%d", object.getLastModified()); } catch (ServiceException e) { if (S3Utils.isServiceExceptionRecoverable(e)) { // The recoverable logic is always true for IOException, so we want to only pass IOException if it is recoverable throw new IOE(e, "Could not fetch last modified timestamp from URI [%s]", uri); } else { throw new RE(e, "Error fetching last modified timestamp from URI [%s]", uri); } } }
final QueryableIndex index = pair.lhs; if (index.getMetadata() == null) { throw new RE("Index metadata doesn't exist for segment[%s]", pair.rhs.getIdentifier());
throw new RE(e, "Failure on row[%s]", value);
throw new RE(e, "HTTP request to[%s] failed", request.getUrl());