/** * Return the tableDesc of the fetchWork. */ public TableDesc getTblDesc() { return work.getTblDesc(); }
/** * Return the tableDesc of the fetchWork. */ public TableDesc getTblDesc() { return work.getTblDesc(); }
public void deriveExplainAttributes() { if (bucketMapjoinContext != null) { bucketMapjoinContext.deriveBucketMapJoinMapping(); } for (FetchWork fetchWork : aliasToFetchWork.values()) { PlanUtils.configureInputJobPropertiesForStorageHandler( fetchWork.getTblDesc()); } }
public void deriveExplainAttributes() { if (bucketMapjoinContext != null) { bucketMapjoinContext.deriveBucketMapJoinMapping(); } for (FetchWork fetchWork : aliasToFetchWork.values()) { PlanUtils.configureInputJobPropertiesForStorageHandler( fetchWork.getTblDesc()); } }
public FetchOperator(FetchWork work, JobConf job, Operator<?> operator, List<VirtualColumn> vcCols) throws HiveException { this.job = job; this.work = work; this.operator = operator; if (operator instanceof TableScanOperator) { Utilities.addTableSchemaToConf(job, (TableScanOperator) operator); } this.vcCols = vcCols; this.hasVC = vcCols != null && !vcCols.isEmpty(); this.isStatReader = work.getTblDesc() == null; this.isPartitioned = !isStatReader && work.isPartitioned(); this.isNonNativeTable = !isStatReader && work.getTblDesc().isNonNative(); initialize(); }
public FetchOperator(FetchWork work, JobConf job, Operator<?> operator, List<VirtualColumn> vcCols) throws HiveException { this.job = job; this.work = work; this.operator = operator; if (operator instanceof TableScanOperator) { Utilities.addTableSchemaToConf(job, (TableScanOperator) operator); } this.vcCols = vcCols; this.hasVC = vcCols != null && !vcCols.isEmpty(); this.isStatReader = work.getTblDesc() == null; this.isPartitioned = !isStatReader && work.isPartitioned(); this.isNonNativeTable = !isStatReader && work.getTblDesc().isNonNative(); initialize(); }
/** * used for bucket map join */ public void setupContext(List<Path> paths) { this.iterPath = paths.iterator(); List<PartitionDesc> partitionDescs; if (!isPartitioned) { this.iterPartDesc = Iterators.cycle(new PartitionDesc(work.getTblDesc(), null)); } else { this.iterPartDesc = work.getPartDescs(paths).iterator(); } this.context = setupExecContext(operator, paths); }
public FetchWork getFetchWork() { // FetchWork's sink is used to hold results, so each query needs a separate copy of FetchWork FetchWork fetch = new FetchWork(cachedResultsPath, fetchWork.getTblDesc(), fetchWork.getLimit()); fetch.setCachedResult(true); return fetch; }
/** * used for bucket map join */ public void setupContext(List<Path> paths) { this.iterPath = paths.iterator(); List<PartitionDesc> partitionDescs; if (!isPartitioned) { this.iterPartDesc = Iterators.cycle(new PartitionDesc(work.getTblDesc(), null)); } else { this.iterPartDesc = work.getPartDescs(paths).iterator(); } this.context = setupExecContext(operator, paths); }
} else { iterPath = Arrays.asList(work.getTblDir()).iterator(); iterPartDesc = Iterators.cycle(new PartitionDesc(work.getTblDesc(), null));
} else { iterPath = Arrays.asList(work.getTblDir()).iterator(); iterPartDesc = Iterators.cycle(new PartitionDesc(work.getTblDesc(), null));
new FetchWork(cachedResultsPath, fetchWork.getTblDesc(), fetchWork.getLimit()); fetchWorkForCache.setCachedResult(true); cacheEntry.fetchWork = fetchWorkForCache;
private FetchWork convertToWork() throws HiveException { inputs.clear(); Utilities.addSchemaEvolutionToTableScanOperator(table, scanOp); TableDesc tableDesc = Utilities.getTableDesc(table); if (!table.isPartitioned()) { inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null)); FetchWork work = new FetchWork(table.getPath(), tableDesc); PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc()); work.setSplitSample(splitSample); return work; } List<Path> listP = new ArrayList<Path>(); List<PartitionDesc> partP = new ArrayList<PartitionDesc>(); for (Partition partition : partsList.getNotDeniedPartns()) { inputs.add(new ReadEntity(partition, parent, parent == null)); listP.add(partition.getDataLocation()); partP.add(Utilities.getPartitionDescFromTableDesc(tableDesc, partition, true)); } Table sourceTable = partsList.getSourceTable(); inputs.add(new ReadEntity(sourceTable, parent, parent == null)); TableDesc table = Utilities.getTableDesc(sourceTable); FetchWork work = new FetchWork(listP, partP, table); if (!work.getPartDesc().isEmpty()) { PartitionDesc part0 = work.getPartDesc().get(0); PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc()); work.setSplitSample(splitSample); } return work; }
private FetchWork convertToWork() throws HiveException { inputs.clear(); Utilities.addSchemaEvolutionToTableScanOperator(table, scanOp); TableDesc tableDesc = Utilities.getTableDesc(table); if (!table.isPartitioned()) { inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null)); FetchWork work = new FetchWork(table.getPath(), tableDesc); PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc()); work.setSplitSample(splitSample); return work; } List<Path> listP = new ArrayList<Path>(); List<PartitionDesc> partP = new ArrayList<PartitionDesc>(); for (Partition partition : partsList.getNotDeniedPartns()) { inputs.add(new ReadEntity(partition, parent, parent == null)); listP.add(partition.getDataLocation()); partP.add(Utilities.getPartitionDescFromTableDesc(tableDesc, partition, true)); } Table sourceTable = partsList.getSourceTable(); inputs.add(new ReadEntity(sourceTable, parent, parent == null)); TableDesc table = Utilities.getTableDesc(sourceTable); FetchWork work = new FetchWork(listP, partP, table); if (!work.getPartDesc().isEmpty()) { PartitionDesc part0 = work.getPartDesc().get(0); PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc()); work.setSplitSample(splitSample); } return work; }
private StructObjectInspector setupOutputObjectInspector() throws HiveException { TableDesc tableDesc = work.getTblDesc(); try { tableSerDe = tableDesc.getDeserializer(job, true); tableOI = (StructObjectInspector) tableSerDe.getObjectInspector(); if (!isPartitioned) { return getTableRowOI(tableOI); } partKeyOI = getPartitionKeyOI(tableDesc); PartitionDesc partDesc = new PartitionDesc(tableDesc, null); List<PartitionDesc> listParts = work.getPartDesc(); // Chose the table descriptor if none of the partitions is present. // For eg: consider the query: // select /*+mapjoin(T1)*/ count(*) from T1 join T2 on T1.key=T2.key // Both T1 and T2 and partitioned tables, but T1 does not have any partitions // FetchOperator is invoked for T1, and listParts is empty. In that case, // use T1's schema to get the ObjectInspector. if (listParts == null || listParts.isEmpty() || !needConversion(tableDesc, listParts)) { return getPartitionedRowOI(tableOI); } convertedOI = (StructObjectInspector) ObjectInspectorConverters.getConvertedOI( tableOI, tableOI, null, false); return getPartitionedRowOI(convertedOI); } catch (Exception e) { throw new HiveException("Failed with exception " + e.getMessage() + StringUtils.stringifyException(e)); } }
private StructObjectInspector setupOutputObjectInspector() throws HiveException { TableDesc tableDesc = work.getTblDesc(); try { tableSerDe = tableDesc.getDeserializer(job, true); tableOI = (StructObjectInspector) tableSerDe.getObjectInspector(); if (!isPartitioned) { return getTableRowOI(tableOI); } partKeyOI = getPartitionKeyOI(tableDesc); PartitionDesc partDesc = new PartitionDesc(tableDesc, null); List<PartitionDesc> listParts = work.getPartDesc(); // Chose the table descriptor if none of the partitions is present. // For eg: consider the query: // select /*+mapjoin(T1)*/ count(*) from T1 join T2 on T1.key=T2.key // Both T1 and T2 and partitioned tables, but T1 does not have any partitions // FetchOperator is invoked for T1, and listParts is empty. In that case, // use T1's schema to get the ObjectInspector. if (listParts == null || listParts.isEmpty() || !needConversion(tableDesc, listParts)) { return getPartitionedRowOI(tableOI); } convertedOI = (StructObjectInspector) ObjectInspectorConverters.getConvertedOI( tableOI, tableOI, null, false); return getPartitionedRowOI(convertedOI); } catch (Exception e) { throw new HiveException("Failed with exception " + e.getMessage() + StringUtils.stringifyException(e)); } }
throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, "" + fTask.getWork().getPartDir().size(), "" + fTask.getWork().getTblDesc().getTableName(), "" + scanLimit);
public void deriveExplainAttributes() { if (bucketMapjoinContext != null) { bucketMapjoinContext.deriveBucketMapJoinMapping(); } for (FetchWork fetchWork : aliasToFetchWork.values()) { if (fetchWork.getTblDesc() == null) { continue; } PlanUtils.configureTableJobPropertiesForStorageHandler( fetchWork.getTblDesc()); } }
public FetchOperator(FetchWork work, JobConf job, Operator<?> operator, List<VirtualColumn> vcCols) throws HiveException { this.job = job; this.work = work; this.operator = operator; this.vcCols = vcCols; this.hasVC = vcCols != null && !vcCols.isEmpty(); this.isStatReader = work.getTblDesc() == null; this.isPartitioned = !isStatReader && work.isPartitioned(); this.isNonNativeTable = !isStatReader && work.getTblDesc().isNonNative(); initialize(); }
/** * used for bucket map join */ public void setupContext(List<Path> paths) { this.iterPath = paths.iterator(); List<PartitionDesc> partitionDescs; if (!isPartitioned) { this.iterPartDesc = Iterators.cycle(new PartitionDesc(work.getTblDesc(), null)); } else { this.iterPartDesc = work.getPartDescs(paths).iterator(); } this.context = setupExecContext(operator, paths); }