public static void checkFileSize(File indexFile) throws IOException { final long fileSize = indexFile.length(); if (fileSize > Integer.MAX_VALUE) { throw new IOE("File[%s] too large[%d]", indexFile, fileSize); } }
private String getCurrentKnownLeader(final boolean cached) throws IOException { final String leader = currentKnownLeader.accumulateAndGet( null, (current, given) -> current == null || !cached ? pickOneHost() : current ); if (leader == null) { throw new IOE("No known server"); } else { return leader; } }
@Override public InputStream getInputStream(URI uri) throws IOException { try { return buildFileObject(uri, s3Client).openInputStream(); } catch (ServiceException e) { throw new IOE(e, "Could not load URI [%s]", uri); } }
throw new IOE("taskLogDir [%s] must be a directory.", taskLogDir);
@Override public long push() throws IOException { try { progressable.progress(); if (outputFS.exists(descriptorPath)) { if (!outputFS.delete(descriptorPath, false)) { throw new IOE("Failed to delete descriptor at [%s]", descriptorPath); } } try (final OutputStream descriptorOut = outputFS.create( descriptorPath, true, DEFAULT_FS_BUFFER_SIZE, progressable )) { HadoopDruidIndexerConfig.JSON_MAPPER.writeValue(descriptorOut, segment); descriptorOut.flush(); } } catch (RuntimeException | IOException ex) { log.info(ex, "Exception in descriptor pusher retry loop"); throw ex; } return -1; } },
@Override public void pushTaskLog(final String taskid, File file) throws IOException { if (config.getDirectory().exists() || config.getDirectory().mkdirs()) { final File outputFile = fileForTask(taskid); Files.copy(file, outputFile); log.info("Wrote task log to: %s", outputFile); } else { throw new IOE("Unable to create task log dir[%s]", config.getDirectory()); } }
fs.rename(intermediateHdfsPath, hdfsPath); if (!fs.exists(hdfsPath)) { throw new IOE("File does not exist even after moving from[%s] to [%s]", intermediateHdfsPath, hdfsPath);
@Override public void close() throws IOException { open = false; internalFiles.put(name, new Metadata(currOut.getFileNum(), startOffset, currOut.getCurrOffset())); writerCurrentlyInUse = false; if (bytesWritten != currOut.getCurrOffset() - startOffset) { throw new ISE("WTF? Perhaps there is some concurrent modification going on?"); } if (bytesWritten != size) { throw new IOE("Expected [%,d] bytes, only saw [%,d], potential corruption?", size, bytesWritten); } // Merge temporary files on to the main smoosh file. mergeWithSmoosher(); } };
return Optional.absent(); } else { throw new IOE(e, "Failed to stream logs from: %s", taskKey);
throw new IOE( "Unable to rename [%s] to [%s]", tmpPath.toUri().toString(),
@Override public InputStream openInputStream() throws IOException { try { synchronized (inputStreamOpener) { if (streamAcquired) { return storageObject.getDataInputStream(); } // lazily promote to full GET storageObject = s3Client.getObject(s3Obj.getBucketName(), s3Obj.getKey()); final InputStream stream = storageObject.getDataInputStream(); streamAcquired = true; return stream; } } catch (ServiceException e) { throw new IOE(e, "Could not load S3 URI [%s]", uri); } }
/** * Returns the "version" (aka last modified timestamp) of the URI * * @param uri The URI to check the last timestamp * * @return The time in ms of the last modification of the URI in String format * * @throws IOException */ @Override public String getVersion(URI uri) throws IOException { try { final FileObject object = buildFileObject(uri, s3Client); return StringUtils.format("%d", object.getLastModified()); } catch (ServiceException e) { if (S3Utils.isServiceExceptionRecoverable(e)) { // The recoverable logic is always true for IOException, so we want to only pass IOException if it is recoverable throw new IOE(e, "Could not fetch last modified timestamp from URI [%s]", uri); } else { throw new RE(e, "Error fetching last modified timestamp from URI [%s]", uri); } } }
@Nullable private Map<String, LookupExtractorFactoryContainer> tryGetLookupListFromCoordinator(String tier) throws Exception { final FullResponseHolder response = fetchLookupsForTier(tier); if (response.getStatus().equals(HttpResponseStatus.NOT_FOUND)) { LOG.warn("No lookups found for tier [%s], response [%s]", tier, response); return null; } else if (!response.getStatus().equals(HttpResponseStatus.OK)) { throw new IOE( "Error while fetching lookup code from Coordinator with status[%s] and content[%s]", response.getStatus(), response.getContent() ); } // Older version of getSpecificTier returns a list of lookup names. // Lookup loading is performed via snapshot if older version is present. // This check is only for backward compatibility and should be removed in a future release if (response.getContent().startsWith("[")) { LOG.info( "Failed to retrieve lookup information from coordinator, " + "because coordinator appears to be running on older Druid version. " + "Attempting to load lookups using snapshot instead" ); return null; } else { return jsonMapper.readValue(response.getContent(), LOOKUPS_ALL_REFERENCE); } }
String redirectUrlStr = fullResponseHolder.getResponse().headers().get("Location"); if (redirectUrlStr == null) { throw new IOE("No redirect location is found in response from url[%s].", request.getUrl()); throw new IOE( ex, "Malformed redirect location is found in response from url[%s], new location[%s].", throw new IOE("Retries exhausted, couldn't fulfill request to [%s].", request.getUrl());
} else { throw new IOE( "Scary HTTP status returned: %s. Check your overlord logs for exceptions.", fullResponseHolder.getStatus()
try { if (!attemptDir.mkdirs()) { throw new IOE("Could not create directories: %s", attemptDir);