private void calculateEntrySize(CacheEntry entry, FetchWork fetchWork) throws IOException { Path queryResultsPath = fetchWork.getTblDir(); FileSystem resultsFs = queryResultsPath.getFileSystem(conf); ContentSummary cs = resultsFs.getContentSummary(queryResultsPath); entry.size = cs.getLength(); }
private void getNextPath() throws Exception { if (work.getTblDir() != null) { if (!tblDataDone) { currPath = work.getTblDirPath();
public boolean isFetchFrom(FileSinkDesc fs) { return fs.getFinalDirName().equals(work.getTblDir()); }
public boolean isFetchFrom(FileSinkDesc fs) { return fs.getFinalDirName().equals(work.getTblDir()); }
iterPartDesc = work.getPartDesc().iterator(); } else { iterPath = Arrays.asList(work.getTblDir()).iterator(); iterPartDesc = Iterators.cycle(new PartitionDesc(work.getTblDesc(), null));
LOG.info("Printing orc file dump for files from table directory.."); directories = Lists.newArrayList(); directories.add(fetchWork.getTblDir());
iterPartDesc = work.getPartDesc().iterator(); } else { iterPath = Arrays.asList(work.getTblDir()).iterator(); iterPartDesc = Iterators.cycle(new PartitionDesc(work.getTblDesc(), null));
LOG.info("Printing orc file dump for files from table directory.."); directories = Lists.newArrayList(); directories.add(fetchWork.getTblDir());
queryResultsPath = fetchWork.getTblDir(); FileSystem resultsFs = queryResultsPath.getFileSystem(conf); long resultSize;
/** * used for bucket map join. there is a hack for getting partitionDesc. bucket map join right now * only allow one partition present in bucket map join. */ public void setupContext(Iterator<Path> iterPath, Iterator<PartitionDesc> iterPartDesc) { this.iterPath = iterPath; this.iterPartDesc = iterPartDesc; if (iterPartDesc == null) { if (work.getTblDir() != null) { this.currTbl = work.getTblDesc(); } else { // hack, get the first. List<PartitionDesc> listParts = work.getPartDesc(); currPart = listParts.get(0); } } }
public boolean isFetchFrom(FileSinkDesc fs) { return fs.getFinalDirName().equals(work.getTblDir()); }
iterPartDesc = work.getPartDesc().iterator(); } else { iterPath = Arrays.asList(work.getTblDir()).iterator(); iterPartDesc = Iterators.cycle(new PartitionDesc(work.getTblDesc(), null));
@Override protected void localizeMRTmpFilesImpl(Context ctx) { String s = work.getTblDir(); if ((s != null) && ctx.isMRTmpFileURI(s)) { work.setTblDir(ctx.localizeMRTmpFileURI(s)); } ArrayList<String> ls = work.getPartDir(); if (ls != null) { ctx.localizePaths(ls); } }
public ObjectInspector getOutputObjectInspector() throws HiveException { try { if (work.getTblDir() != null) { TableDesc tbl = work.getTblDesc(); Deserializer serde = tbl.getDeserializerClass().newInstance(); serde.initialize(job, tbl.getProperties()); return serde.getObjectInspector(); } else if (work.getPartDesc() != null) { List<PartitionDesc> listParts = work.getPartDesc(); if(listParts.size() == 0) { return null; } currPart = listParts.get(0); serde = currPart.getTableDesc().getDeserializerClass().newInstance(); serde.initialize(job, currPart.getTableDesc().getProperties()); setPrtnDesc(); currPart = null; return rowObjectInspector; } else { return null; } } catch (Exception e) { throw new HiveException("Failed with exception " + e.getMessage() + org.apache.hadoop.util.StringUtils.stringifyException(e)); } }
if (m != null) { for (FetchWork fw : m.values()) { String s = fw.getTblDir(); if ((s != null) && ctx.isMRTmpFileURI(s)) { fw.setTblDir(ctx.localizeMRTmpFileURI(s));