/** * Escape commas in the string using the default escape char * @param str a string * @return an escaped string */ public static String escapeString(String str) { return escapeString(str, ESCAPE_CHAR, COMMA); }
/** * Escape <code>charToEscape</code> in the string * with the escape char <code>escapeChar</code> * * @param str string * @param escapeChar escape char * @param charToEscape the char to be escaped * @return an escaped string */ public static String escapeString( String str, char escapeChar, char charToEscape) { return escapeString(str, escapeChar, new char[] {charToEscape}); }
private String makeInputString(List<Path> dirs) { if (dirs == null || dirs.isEmpty()) return ""; StringBuffer str = new StringBuffer(StringUtils.escapeString(dirs.get(0).toString())); for(int i = 1; i < dirs.size(); i++) { str.append(",").append(StringUtils.escapeString(dirs.get(i).toString())); } return str.toString(); } private ValidWriteIdList extractValidWriteIdList() {
/** * @return paths from {@link FileStatus}es into one comma-separated String * @see FileInputFormat#addInputPath(org.apache.hadoop.mapreduce.Job, Path) */ private static String joinFSPaths(FileSystem fs, FileStatus[] statuses) { StringBuilder joined = new StringBuilder(); for (FileStatus status : statuses) { if (joined.length() > 0) { joined.append(','); } Path path = fs.makeQualified(status.getPath()); joined.append(StringUtils.escapeString(path.toString())); } return joined.toString(); }
/** * Take an array of strings and encode it into one string. */ public static String encodeArray(String[] plain) { if (plain == null) return null; String[] escaped = new String[plain.length]; for (int i = 0; i < plain.length; ++i) { if (plain[i] == null) { plain[i] = ""; } escaped[i] = StringUtils.escapeString(plain[i]); } return StringUtils.arrayToString(escaped); }
.append(StringUtils.escapeString(qualifiedPath)); separator = StringUtils.COMMA_STR;
String escapedQuery = StringUtils.escapeString(query, ESCAPE_CHAR, escapedChars); String sql = String.format(SPLIT_QUERY, escapedQuery, numSplits); try {
for(String whProp : webhcatHiveProps) { hiveProps.append(hiveProps.length() > 0 ? "," : "").append(StringUtils.escapeString(whProp));
job.set("mapred.input.dir", StringUtils.escapeString(currPath.toString()));
protected FetchInputFormatSplit[] getNextSplits() throws Exception { while (getNextPath()) { // not using FileInputFormat.setInputPaths() here because it forces a connection to the // default file system - which may or may not be online during pure metadata operations job.set("mapred.input.dir", StringUtils.escapeString(currPath.toString())); // Fetch operator is not vectorized and as such turn vectorization flag off so that // non-vectorized record reader is created below. HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); Class<? extends InputFormat> formatter = currDesc.getInputFileFormatClass(); Utilities.copyTableJobPropertiesToConf(currDesc.getTableDesc(), job); InputFormat inputFormat = getInputFormatFromCache(formatter, job); InputSplit[] splits = inputFormat.getSplits(job, 1); FetchInputFormatSplit[] inputSplits = new FetchInputFormatSplit[splits.length]; for (int i = 0; i < splits.length; i++) { inputSplits[i] = new FetchInputFormatSplit(splits[i], inputFormat); } if (work.getSplitSample() != null) { inputSplits = splitSampling(work.getSplitSample(), inputSplits); } if (inputSplits.length > 0) { return inputSplits; } } return null; }
org.apache.hadoop.util.StringUtils.escapeString(parentDir.getAbsolutePath())); inputSplits = inputFormat.getSplits(localJc, 1); actualSplitNum = inputSplits.length;
org.apache.hadoop.util.StringUtils.escapeString(parentDir.getAbsolutePath())); inputSplits = inputFormat.getSplits(localJc, 1); actualSplitNum = inputSplits.length;
private void addField(List<String> fields, String name, Object value) { String key = StringUtils.escapeString(name, '\\', new char[] {',', '='}); String val = StringUtils.escapeString(String.valueOf(value), '\\', new char[] {',', '='}); fields.add(key + '=' + val); }
/** * Escape commas in the string using the default escape char * @param str a string * @return an escaped string */ public static String escapeString(String str) { return escapeString(str, ESCAPE_CHAR, COMMA); }
private void addField(List<String> fields, String name, Object value) { String key = StringUtils.escapeString(name, '\\', new char[] {',', '='}); String val = StringUtils.escapeString(String.valueOf(value), '\\', new char[] {',', '='}); fields.add(key + '=' + val); }
public static void addInputPath(Configuration conf, Path path) throws IOException { path = path.getFileSystem(conf).makeQualified(path); String dirStr = org.apache.hadoop.util.StringUtils.escapeString(path.toString()); String dirs = conf.get(GuaguaYarnConstants.GUAGUA_YARN_INPUT_DIR); conf.set(GuaguaYarnConstants.GUAGUA_YARN_INPUT_DIR, dirs == null ? dirStr : dirs + "," + dirStr); }
public static void addInputPath( Configuration conf, Path path ) { Path workingDirectory = getWorkingDirectory( conf ); path = new Path( workingDirectory, path ); String dirStr = StringUtils.escapeString( path.toString() ); String dirs = conf.get( "mapred.input.dir" ); conf.set( "mapred.input.dir", dirs == null ? dirStr : dirs + StringUtils.COMMA_STR + dirStr ); }
<T> SummaryBuilder add(String key, T value) { String escapedString = StringUtils.escapeString(String.valueOf(value), StringUtils.ESCAPE_CHAR, charsToEscape).replaceAll("\n", "\\\\n") .replaceAll("\r", "\\\\r"); return _add(key, escapedString); }
<T> SummaryBuilder add(String key, T value) { String escapedString = StringUtils.escapeString(String.valueOf(value), StringUtils.ESCAPE_CHAR, charsToEscape).replaceAll("\n", "\\\\n") .replaceAll("\r", "\\\\r"); return _add(key, escapedString); }
<T> SummaryBuilder add(String key, T value) { String escapedString = StringUtils.escapeString(String.valueOf(value), StringUtils.ESCAPE_CHAR, charsToEscape).replaceAll("\n", "\\\\n") .replaceAll("\r", "\\\\r"); return _add(key, escapedString); }