@SuppressWarnings("nls") public static MapredWork getMapRedWork() { return new MapredWork(); }
@SuppressWarnings("nls") public static MapredWork getMapRedWork() { return new MapredWork(); }
/** * create a new plan and return. The pan won't contain the name to split * sample information in parse context. * * @return the new plan */ public static MapredWork getMapRedWorkFromConf(HiveConf conf) { MapredWork mrWork = new MapredWork(); MapWork work = mrWork.getMapWork(); boolean mapperCannotSpanPartns = conf.getBoolVar( HiveConf.ConfVars.HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS); work.setMapperCannotSpanPartns(mapperCannotSpanPartns); work.setPathToAliases(new LinkedHashMap<Path, ArrayList<String>>()); work.setPathToPartitionInfo(new LinkedHashMap<Path, PartitionDesc>()); work.setAliasToWork(new LinkedHashMap<String, Operator<? extends OperatorDesc>>()); return mrWork; }
public static MapredWork getMapRedWork(Configuration conf) { MapredWork w = new MapredWork(); w.setMapWork(getMapWork(conf)); w.setReduceWork(getReduceWork(conf)); return w; }
/** * create a new plan and return. The pan won't contain the name to split * sample information in parse context. * * @return the new plan */ public static MapredWork getMapRedWorkFromConf(HiveConf conf) { MapredWork mrWork = new MapredWork(); MapWork work = mrWork.getMapWork(); boolean mapperCannotSpanPartns = conf.getBoolVar( HiveConf.ConfVars.HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS); work.setMapperCannotSpanPartns(mapperCannotSpanPartns); work.setPathToAliases(new LinkedHashMap<Path, ArrayList<String>>()); work.setPathToPartitionInfo(new LinkedHashMap<Path, PartitionDesc>()); work.setAliasToWork(new LinkedHashMap<String, Operator<? extends OperatorDesc>>()); return mrWork; }
public static MapredWork getMapRedWork(Configuration conf) { MapredWork w = new MapredWork(); w.setMapWork(getMapWork(conf)); w.setReduceWork(getReduceWork(conf)); return w; }
@Override protected void setUp() throws IOException { conf = new Configuration(); job = new JobConf(conf); TableDesc tblDesc = Utilities.defaultTd; PartitionDesc partDesc = new PartitionDesc(tblDesc, null); LinkedHashMap<Path, PartitionDesc> pt = new LinkedHashMap<>(); pt.put(new Path("/tmp/testfolder"), partDesc); MapredWork mrwork = new MapredWork(); mrwork.getMapWork().setPathToPartitionInfo(pt); Utilities.setMapRedWork(job, mrwork,new Path("/tmp/" + System.getProperty("user.name"), "hive")); fileSystem = FileSystem.getLocal(conf); testDir = new Path(System.getProperty("test.tmp.dir", System.getProperty( "user.dir", new File(".").getAbsolutePath())) + "/TestSymlinkTextInputFormat"); reporter = Reporter.NULL; fileSystem.delete(testDir, true); dataDir1 = new Path(testDir, "datadir1"); dataDir2 = new Path(testDir, "datadir2"); symlinkDir = new Path(testDir, "symlinkdir"); }
public void testAvoidSplitCombination() throws Exception { Configuration conf = new Configuration(); JobConf job = new JobConf(conf); TableDesc tblDesc = Utilities.defaultTd; tblDesc.setInputFileFormatClass(TestSkipCombineInputFormat.class); PartitionDesc partDesc = new PartitionDesc(tblDesc, null); LinkedHashMap<Path, PartitionDesc> pt = new LinkedHashMap<>(); pt.put(new Path("/tmp/testfolder1"), partDesc); pt.put(new Path("/tmp/testfolder2"), partDesc); MapredWork mrwork = new MapredWork(); mrwork.getMapWork().setPathToPartitionInfo(pt); Path mapWorkPath = new Path("/tmp/" + System.getProperty("user.name"), "hive"); Utilities.setMapRedWork(conf, mrwork, mapWorkPath); try { Path[] paths = new Path[2]; paths[0] = new Path("/tmp/testfolder1"); paths[1] = new Path("/tmp/testfolder2"); CombineHiveInputFormat combineInputFormat = ReflectionUtils.newInstance(CombineHiveInputFormat.class, conf); combineInputFormat.pathToPartitionInfo = Utilities.getMapWork(conf).getPathToPartitionInfo(); Set results = combineInputFormat.getNonCombinablePathIndices(job, paths, 2); assertEquals("Should have both path indices in the results set", 2, results.size()); } finally { // Cleanup the mapwork path FileSystem.get(conf).delete(mapWorkPath, true); } }
@Test public void mrTaskSumbitViaChildWithImpersonation() throws IOException, LoginException { Utils.getUGI().setAuthenticationMethod(PROXY); Context ctx = Mockito.mock(Context.class); when(ctx.getLocalTmpPath()).thenReturn(new Path(System.getProperty("java.io.tmpdir"))); DriverContext dctx = new DriverContext(ctx); QueryState queryState = new QueryState.Builder().build(); HiveConf conf= queryState.getConf(); conf.setBoolVar(HiveConf.ConfVars.SUBMITVIACHILD, true); MapredWork mrWork = new MapredWork(); mrWork.setMapWork(Mockito.mock(MapWork.class)); MapRedTask mrTask = Mockito.spy(new MapRedTask()); mrTask.setWork(mrWork); mrTask.initialize(queryState, null, dctx, null); mrTask.jobExecHelper = Mockito.mock(HadoopJobExecHelper.class); when(mrTask.jobExecHelper.progressLocal(Mockito.any(Process.class), Mockito.anyString())).thenReturn(0); mrTask.execute(dctx); ArgumentCaptor<String[]> captor = ArgumentCaptor.forClass(String[].class); verify(mrTask).spawn(Mockito.anyString(), Mockito.anyString(), captor.capture()); String expected = "HADOOP_PROXY_USER=" + Utils.getUGI().getUserName(); Assert.assertTrue(Arrays.asList(captor.getValue()).contains(expected)); }
private void init() throws IOException { conf = new JobConf(); resetIOContext(); rcfReader = mock(RCFileRecordReader.class); when(rcfReader.next((LongWritable)anyObject(), (BytesRefArrayWritable )anyObject())).thenReturn(true); // Since the start is 0, and the length is 100, the first call to sync should be with the value // 50 so return that for getPos() when(rcfReader.getPos()).thenReturn(50L); conf.setBoolean("hive.input.format.sorted", true); TableDesc tblDesc = Utilities.defaultTd; PartitionDesc partDesc = new PartitionDesc(tblDesc, null); LinkedHashMap<Path, PartitionDesc> pt = new LinkedHashMap<>(); pt.put(new Path("/tmp/testfolder"), partDesc); MapredWork mrwork = new MapredWork(); mrwork.getMapWork().setPathToPartitionInfo(pt); Utilities.setMapRedWork(conf, mrwork,new Path("/tmp/" + System.getProperty("user.name"), "hive")); hiveSplit = new TestHiveInputSplit(); hbsReader = new TestHiveRecordReader(rcfReader, conf); hbsReader.initIOContext(hiveSplit, conf, Class.class, rcfReader); }
newPlan.setInputformat(HiveInputFormat.class.getName()); MapredWork w = new MapredWork(); w.setMapWork(newPlan);
ao.put("a", op); MapredWork mrwork = new MapredWork(); mrwork.getMapWork().setPathToAliases(pa); mrwork.getMapWork().setPathToPartitionInfo(pt);
newPlan.setInputformat(HiveInputFormat.class.getName()); MapredWork w = new MapredWork(); w.setMapWork(newPlan);
MapredWork mrwork = new MapredWork(); mrwork.getMapWork().setPathToAliases(pathToAliases); mrwork.getMapWork().setPathToPartitionInfo(pathToPartitionInfo);
addInputPaths(job, work); MapredWork mrWork = new MapredWork(); mrWork.setMapWork(work); Utilities.setMapRedWork(job, mrWork, ctx.getMRTmpPath());
addInputPaths(job, work); MapredWork mrWork = new MapredWork(); mrWork.setMapWork(work); Utilities.setMapRedWork(job, mrWork, ctx.getMRTmpPath());
addInputPaths(job, work); MapredWork mrWork = new MapredWork(); mrWork.setMapWork(work); Utilities.setMapRedWork(job, mrWork, ctx.getMRTmpPath());
((SparkWork) work).add(cplan); } else { work = new MapredWork(); ((MapredWork)work).setMapWork(cplan);
((SparkWork) work).add(cplan); } else { work = new MapredWork(); ((MapredWork)work).setMapWork(cplan);
public static MapredWork getMapRedWork(Configuration conf) { MapredWork w = new MapredWork(); w.setMapWork(getMapWork(conf)); w.setReduceWork(getReduceWork(conf)); return w; }