iterPartDesc = work.getPartDesc().iterator(); } else { iterPath = Arrays.asList(work.getTblDir()).iterator();
iterPartDesc = work.getPartDesc().iterator(); } else { iterPath = Arrays.asList(work.getTblDir()).iterator();
private FetchWork convertToWork() throws HiveException { inputs.clear(); Utilities.addSchemaEvolutionToTableScanOperator(table, scanOp); TableDesc tableDesc = Utilities.getTableDesc(table); if (!table.isPartitioned()) { inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null)); FetchWork work = new FetchWork(table.getPath(), tableDesc); PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc()); work.setSplitSample(splitSample); return work; } List<Path> listP = new ArrayList<Path>(); List<PartitionDesc> partP = new ArrayList<PartitionDesc>(); for (Partition partition : partsList.getNotDeniedPartns()) { inputs.add(new ReadEntity(partition, parent, parent == null)); listP.add(partition.getDataLocation()); partP.add(Utilities.getPartitionDescFromTableDesc(tableDesc, partition, true)); } Table sourceTable = partsList.getSourceTable(); inputs.add(new ReadEntity(sourceTable, parent, parent == null)); TableDesc table = Utilities.getTableDesc(sourceTable); FetchWork work = new FetchWork(listP, partP, table); if (!work.getPartDesc().isEmpty()) { PartitionDesc part0 = work.getPartDesc().get(0); PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc()); work.setSplitSample(splitSample); } return work; }
private FetchWork convertToWork() throws HiveException { inputs.clear(); Utilities.addSchemaEvolutionToTableScanOperator(table, scanOp); TableDesc tableDesc = Utilities.getTableDesc(table); if (!table.isPartitioned()) { inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null)); FetchWork work = new FetchWork(table.getPath(), tableDesc); PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc()); work.setSplitSample(splitSample); return work; } List<Path> listP = new ArrayList<Path>(); List<PartitionDesc> partP = new ArrayList<PartitionDesc>(); for (Partition partition : partsList.getNotDeniedPartns()) { inputs.add(new ReadEntity(partition, parent, parent == null)); listP.add(partition.getDataLocation()); partP.add(Utilities.getPartitionDescFromTableDesc(tableDesc, partition, true)); } Table sourceTable = partsList.getSourceTable(); inputs.add(new ReadEntity(sourceTable, parent, parent == null)); TableDesc table = Utilities.getTableDesc(sourceTable); FetchWork work = new FetchWork(listP, partP, table); if (!work.getPartDesc().isEmpty()) { PartitionDesc part0 = work.getPartDesc().get(0); PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc()); work.setSplitSample(splitSample); } return work; }
if (td == null && ft.getWork() != null && ft.getWork().getPartDesc() != null) { if (ft.getWork().getPartDesc().size() > 0) { td = ft.getWork().getPartDesc().get(0).getTableDesc();
if (td == null && ft.getWork() != null && ft.getWork().getPartDesc() != null) { if (ft.getWork().getPartDesc().size() > 0) { td = ft.getWork().getPartDesc().get(0).getTableDesc();
private StructObjectInspector setupOutputObjectInspector() throws HiveException { TableDesc tableDesc = work.getTblDesc(); try { tableSerDe = tableDesc.getDeserializer(job, true); tableOI = (StructObjectInspector) tableSerDe.getObjectInspector(); if (!isPartitioned) { return getTableRowOI(tableOI); } partKeyOI = getPartitionKeyOI(tableDesc); PartitionDesc partDesc = new PartitionDesc(tableDesc, null); List<PartitionDesc> listParts = work.getPartDesc(); // Chose the table descriptor if none of the partitions is present. // For eg: consider the query: // select /*+mapjoin(T1)*/ count(*) from T1 join T2 on T1.key=T2.key // Both T1 and T2 and partitioned tables, but T1 does not have any partitions // FetchOperator is invoked for T1, and listParts is empty. In that case, // use T1's schema to get the ObjectInspector. if (listParts == null || listParts.isEmpty() || !needConversion(tableDesc, listParts)) { return getPartitionedRowOI(tableOI); } convertedOI = (StructObjectInspector) ObjectInspectorConverters.getConvertedOI( tableOI, tableOI, null, false); return getPartitionedRowOI(convertedOI); } catch (Exception e) { throw new HiveException("Failed with exception " + e.getMessage() + StringUtils.stringifyException(e)); } }
private StructObjectInspector setupOutputObjectInspector() throws HiveException { TableDesc tableDesc = work.getTblDesc(); try { tableSerDe = tableDesc.getDeserializer(job, true); tableOI = (StructObjectInspector) tableSerDe.getObjectInspector(); if (!isPartitioned) { return getTableRowOI(tableOI); } partKeyOI = getPartitionKeyOI(tableDesc); PartitionDesc partDesc = new PartitionDesc(tableDesc, null); List<PartitionDesc> listParts = work.getPartDesc(); // Chose the table descriptor if none of the partitions is present. // For eg: consider the query: // select /*+mapjoin(T1)*/ count(*) from T1 join T2 on T1.key=T2.key // Both T1 and T2 and partitioned tables, but T1 does not have any partitions // FetchOperator is invoked for T1, and listParts is empty. In that case, // use T1's schema to get the ObjectInspector. if (listParts == null || listParts.isEmpty() || !needConversion(tableDesc, listParts)) { return getPartitionedRowOI(tableOI); } convertedOI = (StructObjectInspector) ObjectInspectorConverters.getConvertedOI( tableOI, tableOI, null, false); return getPartitionedRowOI(convertedOI); } catch (Exception e) { throw new HiveException("Failed with exception " + e.getMessage() + StringUtils.stringifyException(e)); } }
/** * used for bucket map join. there is a hack for getting partitionDesc. bucket map join right now * only allow one partition present in bucket map join. */ public void setupContext(Iterator<Path> iterPath, Iterator<PartitionDesc> iterPartDesc) { this.iterPath = iterPath; this.iterPartDesc = iterPartDesc; if (iterPartDesc == null) { if (work.getTblDir() != null) { this.currTbl = work.getTblDesc(); } else { // hack, get the first. List<PartitionDesc> listParts = work.getPartDesc(); currPart = listParts.get(0); } } }
} else { iterPath = FetchWork.convertStringToPathArray(work.getPartDir()).iterator(); iterPartDesc = work.getPartDesc().iterator();
iterPartDesc = work.getPartDesc().iterator(); } else { iterPath = Arrays.asList(work.getTblDir()).iterator();
private FetchWork convertToWork() throws HiveException { inputs.clear(); if (!table.isPartitioned()) { inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null)); FetchWork work = new FetchWork(table.getPath(), Utilities.getTableDesc(table)); PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc()); work.setSplitSample(splitSample); return work; } List<Path> listP = new ArrayList<Path>(); List<PartitionDesc> partP = new ArrayList<PartitionDesc>(); for (Partition partition : partsList.getNotDeniedPartns()) { inputs.add(new ReadEntity(partition, parent, parent == null)); listP.add(partition.getDataLocation()); partP.add(Utilities.getPartitionDesc(partition)); } Table sourceTable = partsList.getSourceTable(); inputs.add(new ReadEntity(sourceTable, parent, parent == null)); TableDesc table = Utilities.getTableDesc(sourceTable); FetchWork work = new FetchWork(listP, partP, table); if (!work.getPartDesc().isEmpty()) { PartitionDesc part0 = work.getPartDesc().get(0); PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc()); work.setSplitSample(splitSample); } return work; }
public ObjectInspector getOutputObjectInspector() throws HiveException { try { if (work.getTblDir() != null) { TableDesc tbl = work.getTblDesc(); Deserializer serde = tbl.getDeserializerClass().newInstance(); serde.initialize(job, tbl.getProperties()); return serde.getObjectInspector(); } else if (work.getPartDesc() != null) { List<PartitionDesc> listParts = work.getPartDesc(); if(listParts.size() == 0) { return null; } currPart = listParts.get(0); serde = currPart.getTableDesc().getDeserializerClass().newInstance(); serde.initialize(job, currPart.getTableDesc().getProperties()); setPrtnDesc(); currPart = null; return rowObjectInspector; } else { return null; } } catch (Exception e) { throw new HiveException("Failed with exception " + e.getMessage() + org.apache.hadoop.util.StringUtils.stringifyException(e)); } }
if (td == null && ft.getWork() != null && ft.getWork().getPartDesc() != null) { if (ft.getWork().getPartDesc().size() > 0) { td = ft.getWork().getPartDesc().get(0).getTableDesc();
if (td == null && ft.getWork() != null && ft.getWork().getPartDesc() != null) { if (ft.getWork().getPartDesc().size() > 0) { td = ft.getWork().getPartDesc().get(0).getTableDesc();
private StructObjectInspector setupOutputObjectInspector() throws HiveException { TableDesc tableDesc = work.getTblDesc(); try { tableSerDe = tableDesc.getDeserializer(job, true); tableOI = (StructObjectInspector) tableSerDe.getObjectInspector(); if (!isPartitioned) { return getTableRowOI(tableOI); } partKeyOI = getPartitionKeyOI(tableDesc); PartitionDesc partDesc = new PartitionDesc(tableDesc, null); List<PartitionDesc> listParts = work.getPartDesc(); // Chose the table descriptor if none of the partitions is present. // For eg: consider the query: // select /*+mapjoin(T1)*/ count(*) from T1 join T2 on T1.key=T2.key // Both T1 and T2 and partitioned tables, but T1 does not have any partitions // FetchOperator is invoked for T1, and listParts is empty. In that case, // use T1's schema to get the ObjectInspector. if (listParts == null || listParts.isEmpty() || !needConversion(tableDesc, listParts)) { return getPartitionedRowOI(tableOI); } convertedOI = (StructObjectInspector) ObjectInspectorConverters.getConvertedOI( tableOI, tableOI, null, false); return getPartitionedRowOI(convertedOI); } catch (Exception e) { throw new HiveException("Failed with exception " + e.getMessage() + StringUtils.stringifyException(e)); } }