public void restoreOriginalTracker() { if (originalTracker != null) { ShimLoader.getHadoopShims().setJobLauncherRpcAddress(conf, originalTracker); originalTracker = null; } }
@Override public void abortJob(JobContext context, int status) throws IOException { JobConf conf = ShimLoader.getHadoopShims().getJobConf(context); Path tmpLocation = new Path(conf.get(TMP_LOCATION)); FileSystem fs = tmpLocation.getFileSystem(conf); LOG.debug("Removing " + tmpLocation.toString()); fs.delete(tmpLocation, true); } }
/** * Get an instance of HdfsErasureCodingShim from a config. */ public static HadoopShims.HdfsErasureCodingShim getErasureShim(Configuration config) throws IOException { HadoopShims hadoopShims = ShimLoader.getHadoopShims(); FileSystem fileSystem = FileSystem.get(config); return hadoopShims.createHdfsErasureCodingShim(fileSystem, config); }
private static String getMetastoreJdbcPasswd(HiveConf conf) throws IOException { return ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.METASTOREPWD.varname); } }
@Override public Object run() throws Exception { FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf()); ShimLoader.getHadoopShims().checkFileAccess(fsAsUser, stat, action); addChildren(fsAsUser, stat.getPath(), children); return null; } });
public static org.apache.hadoop.mapreduce.TaskAttemptID createTaskAttemptID(JobID jobId, boolean isMap, int taskId, int id) { return ShimLoader.getHadoopShims().newTaskAttemptID(jobId, isMap, taskId, id); } public static org.apache.hadoop.mapred.JobContext createJobContext(org.apache.hadoop.mapreduce.JobContext context) {
@Override public void initialize(URI name, Configuration conf) throws IOException { // create a proxy for the local filesystem // the scheme/authority serving as the proxy is derived // from the supplied URI this.scheme = name.getScheme(); String nameUriString = name.toString(); String authority = name.getAuthority() != null ? name.getAuthority() : ""; String proxyUriString = scheme + "://" + authority + "/"; fs = ShimLoader.getHadoopShims().createProxyFileSystem(localFs, URI.create(proxyUriString)); fs.initialize(name, conf); }
private void storeBucketPathMapping(TableScanOperator tsOp, FileStatus[] srcs) { Map<String, Integer> bucketFileNameMapping = new HashMap<String, Integer>(); for (int pos = 0; pos < srcs.length; pos++) { if (ShimLoader.getHadoopShims().isDirectory(srcs[pos])) { throw new RuntimeException("Was expecting '" + srcs[pos].getPath() + "' to be bucket file."); } bucketFileNameMapping.put(srcs[pos].getPath().getName(), pos); } tsOp.getConf().setBucketFileNameMapping(bucketFileNameMapping); }
private static Path getQualifiedPathWithoutSchemeAndAuthority(Path srcf, FileSystem fs) { Path currentWorkingDir = fs.getWorkingDirectory(); Path path = srcf.makeQualified(srcf.toUri(), currentWorkingDir); return ShimLoader.getHadoopShims().getPathWithoutSchemeAndAuthority(path); }
@Override public void checkOutputSpecs(FileSystem ignored, JobConf jc) throws IOException { //delegate to the new api Job job = new Job(jc); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); checkOutputSpecs(jobContext); }
public CombineHiveInputSplit() throws IOException { this(ShimLoader.getHadoopShims().getCombineFileInputFormat() .getInputSplitShim()); }
@Override public void cleanupOnFailure(String location, Job job) throws IOException { ShimLoader.getHadoopShims().getHCatShim().abortJob(getOutputFormat(), job); } }
private String getPrincipalWithoutRealmAndHost(String fullPrincipal) throws HttpAuthenticationException { KerberosNameShim fullKerberosName; try { fullKerberosName = ShimLoader.getHadoopShims().getKerberosNameShim(fullPrincipal); return fullKerberosName.getShortName(); } catch (IOException e) { throw new HttpAuthenticationException(e); } } }
@Override public void checkOutputSpecs(final FileSystem ignored, final JobConf job) throws IOException { realOutputFormat.checkOutputSpecs(ShimLoader.getHadoopShims().getHCatShim().createJobContext(job, null)); }
@Override public RecordWriter getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException { Path file = getDefaultWorkFile(context, ""); return new OrcRecordWriter(file, OrcFile.writerOptions( ShimLoader.getHadoopShims().getConfiguration(context))); } }
/** * Invoked after runInternal(), even if an exception is thrown in runInternal(). * Clean up resources, which was set up in beforeRun(). */ protected void afterRun() { LogUtils.unregisterLoggingContext(); // Reset back to session context after the query is done ShimLoader.getHadoopShims().setHadoopSessionContext(parentSession.getSessionState().getSessionId()); }
private FileStatus[] getTrashContents() throws Exception { FileSystem fs = FileSystem.get(hiveConf); Path trashDir = ShimLoader.getHadoopShims().getCurrentTrashPath(hiveConf, fs); return fs.globStatus(trashDir.suffix("/*")); }
@Override public RecordReader<NullWritable, OrcStruct> createRecordReader( InputSplit inputSplit, TaskAttemptContext context) throws IOException, InterruptedException { FileSplit fileSplit = (FileSplit) inputSplit; Path path = fileSplit.getPath(); Configuration conf = ShimLoader.getHadoopShims() .getConfiguration(context); return new OrcRecordReader(OrcFile.createReader(path, OrcFile.readerOptions(conf)), ShimLoader.getHadoopShims().getConfiguration(context), fileSplit.getStart(), fileSplit.getLength()); }
@Override public void beforeClass(HiveTestEnvContext ctx) throws Exception { HadoopShims shims = ShimLoader.getHadoopShims(); mr1 = shims.getLocalMiniTezCluster(ctx.hiveConf, true); mr1.setupConfiguration(ctx.hiveConf); }
/** * Invoked before runInternal(). * Set up some preconditions, or configurations. */ protected void beforeRun() { ShimLoader.getHadoopShims().setHadoopQueryContext(queryState.getQueryId()); createOperationLog(); LogUtils.registerLoggingContext(queryState.getConf()); }