String[] oldTmpJars = jobConf.getStrings(MR_JAR_PROPERTY); jobConf.setStrings(MR_JAR_PROPERTY, new String[0]); for (BaseWork work : workGraph.keySet()) { work.configureJobConf(jobConf); String[] newTmpJars = jobConf.getStrings(MR_JAR_PROPERTY); if (oldTmpJars != null || newTmpJars != null) { String[] finalTmpJars;
String[] oldTmpJars = jobConf.getStrings(MR_JAR_PROPERTY); jobConf.setStrings(MR_JAR_PROPERTY, new String[0]); for (BaseWork work : workGraph.keySet()) { work.configureJobConf(jobConf); String[] newTmpJars = jobConf.getStrings(MR_JAR_PROPERTY); if (oldTmpJars != null || newTmpJars != null) { String[] finalTmpJars;
private static void setupTetherJob(JobConf job) throws IOException { job.setMapRunnerClass(TetherMapRunner.class); job.setPartitionerClass(TetherPartitioner.class); job.setReducerClass(TetherReducer.class); job.setInputFormat(TetherInputFormat.class); job.setOutputFormat(TetherOutputFormat.class); job.setOutputKeyClass(TetherData.class); job.setOutputKeyComparatorClass(TetherKeyComparator.class); job.setMapOutputValueClass(NullWritable.class); // set the map output key class to TetherData job.setMapOutputKeyClass(TetherData.class); // if protocol isn't set if (job.getStrings(TETHER_PROTOCOL)==null) { job.set(TETHER_PROTOCOL, "sasl"); } // add TetherKeySerialization to io.serializations Collection<String> serializations = job.getStringCollection("io.serializations"); if (!serializations.contains(TetherKeySerialization.class.getName())) { serializations.add(TetherKeySerialization.class.getName()); job.setStrings("io.serializations", serializations.toArray(new String[0])); } // determine whether the executable should be added to the cache. if (job.getBoolean(TETHER_EXEC_CACHED,false)){ DistributedCache.addCacheFile(getExecutable(job), job); } }
String[] paths = conf.getStrings("mapred.input.dir"); for(int p=0; p < PARTITIONS; ++p) { partDir[p] = new Path(paths[p]);
public String[] getLocalDirs() throws IOException { return getStrings("mapred.local.dir"); }
String[] getInputFieldNames() { return job.getStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY); }
String[] getOutputFieldNames() { return job.getStrings(DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY); }
String[] getInputFieldNames() { return job.getStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY); }
String[] getOutputFieldNames() { return job.getStrings(DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY); }
public String[] getLocalDirs() throws IOException { return getStrings("mapred.local.dir"); }
public static String[] getMoveToLatestTopics(JobConf job) { return job.getStrings(KAFKA_MOVE_TO_LAST_OFFSET_LIST); }
/** * * @param key * @return Array of strings attached configured for the given key. Different * from Hadoop's default getStrings(), this method returns an array length 0 * when the key does not exist, and substitutes nested variables (e.g. * ${name}) in the values. */ @Override public String[] getStrings(String key) { String values[] = super.getStrings(key); if (values == null) { return new String[0]; } //for (int i = 0; i < values.length; i++) { // values[i] = substituteString(values[i]); //} return values; }
.getStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY); assertEquals("field1", fields[0]); assertEquals("field2", fields[1]);
public static String[] getKafkaWhitelistTopic(JobConf job) { if (job.get(KAFKA_WHITELIST_TOPIC) != null && !job.get(KAFKA_WHITELIST_TOPIC).isEmpty()) { return job.getStrings(KAFKA_WHITELIST_TOPIC); } else { return new String[] {}; } }
public static String[] getKafkaBlacklistTopic(JobConf job) { if (job.get(KAFKA_BLACKLIST_TOPIC) != null && !(job.get(KAFKA_BLACKLIST_TOPIC).isEmpty())) { return job.getStrings(KAFKA_BLACKLIST_TOPIC); } else { return new String[] {}; } }
String[] oldTmpJars = jobConf.getStrings(MR_JAR_PROPERTY); jobConf.setStrings(MR_JAR_PROPERTY, new String[0]); for (BaseWork work : workGraph.keySet()) { work.configureJobConf(jobConf); String[] newTmpJars = jobConf.getStrings(MR_JAR_PROPERTY); if (oldTmpJars != null || newTmpJars != null) { String[] finalTmpJars;
return; } else { String[] schemaUrls = job.getStrings(SCHEMA_URL); if (schemaUrls != null) { HashMap<String, Schema> hmap = new HashMap<String, Schema>();
.getStrings(MRConfig.LOCAL_DIR)) { assertTrue("Local dir " + childMapredLocalDir + " is not sandboxed !!", childMapredLocalDir.endsWith(TaskTracker.getLocalTaskDir(task
private static void setupTetherJob(JobConf job) throws IOException { job.setMapRunnerClass(TetherMapRunner.class); job.setPartitionerClass(TetherPartitioner.class); job.setReducerClass(TetherReducer.class); job.setInputFormat(TetherInputFormat.class); job.setOutputFormat(TetherOutputFormat.class); job.setOutputKeyClass(TetherData.class); job.setOutputKeyComparatorClass(TetherKeyComparator.class); job.setMapOutputValueClass(NullWritable.class); // set the map output key class to TetherData job.setMapOutputKeyClass(TetherData.class); // if protocol isn't set if (job.getStrings(TETHER_PROTOCOL)==null) { job.set(TETHER_PROTOCOL, "sasl"); } // add TetherKeySerialization to io.serializations Collection<String> serializations = job.getStringCollection("io.serializations"); if (!serializations.contains(TetherKeySerialization.class.getName())) { serializations.add(TetherKeySerialization.class.getName()); job.setStrings("io.serializations", serializations.toArray(new String[0])); } // determine whether the executable should be added to the cache. if (job.getBoolean(TETHER_EXEC_CACHED,false)){ DistributedCache.addCacheFile(getExecutable(job), job); } }
for (String dir : trackerFConf.getStrings(MRConfig.LOCAL_DIR)) { File attemptDir = new File(dir, TaskTracker.getLocalTaskDir(task.getUser(), jobId