public static boolean runSingleJob(Jobby job, HadoopDruidIndexerConfig config) { boolean succeeded = job.run(); if (!config.getSchema().getTuningConfig().isLeaveIntermediate()) { if (succeeded || config.getSchema().getTuningConfig().isCleanupOnFailure()) { Path workingPath = config.makeIntermediatePath(); log.info("Deleting path[%s]", workingPath); try { Configuration conf = injectSystemProperties(new Configuration()); config.addJobProperties(conf); workingPath.getFileSystem(conf).delete(workingPath, true); } catch (IOException e) { log.error(e, "Failed to cleanup path[%s]", workingPath); } } } return succeeded; }
public static boolean runJobs(List<Jobby> jobs, HadoopDruidIndexerConfig config) { boolean succeeded = true; for (Jobby job : jobs) { if (!job.run()) { succeeded = false; break; } } if (!config.getSchema().getTuningConfig().isLeaveIntermediate()) { if (succeeded || config.getSchema().getTuningConfig().isCleanupOnFailure()) { Path workingPath = config.makeIntermediatePath(); log.info("Deleting path[%s]", workingPath); try { Configuration conf = injectSystemProperties(new Configuration()); config.addJobProperties(conf); workingPath.getFileSystem(conf).delete(workingPath, true); } catch (IOException e) { log.error(e, "Failed to cleanup path[%s]", workingPath); } } } return succeeded; }
tuningConfig.getMaxBytesInMemory(), tuningConfig.isLeaveIntermediate(), tuningConfig.isCleanupOnFailure(), tuningConfig.isOverwriteFiles(), tuningConfig.isIgnoreInvalidRows(),
public static boolean runSingleJob(Jobby job, HadoopDruidIndexerConfig config) { boolean succeeded = job.run(); if (!config.getSchema().getTuningConfig().isLeaveIntermediate()) { if (succeeded || config.getSchema().getTuningConfig().isCleanupOnFailure()) { Path workingPath = config.makeIntermediatePath(); log.info("Deleting path[%s]", workingPath); try { Configuration conf = injectSystemProperties(new Configuration()); config.addJobProperties(conf); workingPath.getFileSystem(conf).delete(workingPath, true); } catch (IOException e) { log.error(e, "Failed to cleanup path[%s]", workingPath); } } } return succeeded; }
public static boolean runJobs(List<Jobby> jobs, HadoopDruidIndexerConfig config) { boolean succeeded = true; for (Jobby job : jobs) { if (!job.run()) { succeeded = false; break; } } if (!config.getSchema().getTuningConfig().isLeaveIntermediate()) { if (succeeded || config.getSchema().getTuningConfig().isCleanupOnFailure()) { Path workingPath = config.makeIntermediatePath(); log.info("Deleting path[%s]", workingPath); try { Configuration conf = injectSystemProperties(new Configuration()); config.addJobProperties(conf); workingPath.getFileSystem(conf).delete(workingPath, true); } catch (IOException e) { log.error(e, "Failed to cleanup path[%s]", workingPath); } } } return succeeded; }
tuningConfig.getMaxBytesInMemory(), tuningConfig.isLeaveIntermediate(), tuningConfig.isCleanupOnFailure(), tuningConfig.isOverwriteFiles(), tuningConfig.isIgnoreInvalidRows(),