Codota Logo
StringUtils.escapeString
Code IndexAdd Codota to your IDE (free)

How to use
escapeString
method
in
org.apache.hadoop.util.StringUtils

Best Java code snippets using org.apache.hadoop.util.StringUtils.escapeString (Showing top 20 results out of 378)

  • Add the Codota plugin to your IDE and get smart completions
private void myMethod () {
BufferedReader b =
  • Codota IconInputStream in;new BufferedReader(new InputStreamReader(in))
  • Codota IconReader in;new BufferedReader(in)
  • Codota IconFile file;new BufferedReader(new FileReader(file))
  • Smart code suggestions by Codota
}
origin: org.apache.hadoop/hadoop-common

/**
 * Escape commas in the string using the default escape char
 * @param str a string
 * @return an escaped string
 */
public static String escapeString(String str) {
 return escapeString(str, ESCAPE_CHAR, COMMA);
}

origin: org.apache.hadoop/hadoop-common

/**
 * Escape <code>charToEscape</code> in the string 
 * with the escape char <code>escapeChar</code>
 * 
 * @param str string
 * @param escapeChar escape char
 * @param charToEscape the char to be escaped
 * @return an escaped string
 */
public static String escapeString(
  String str, char escapeChar, char charToEscape) {
 return escapeString(str, escapeChar, new char[] {charToEscape});
}

origin: apache/hive

private String makeInputString(List<Path> dirs) {
 if (dirs == null || dirs.isEmpty()) return "";
 StringBuffer str = new StringBuffer(StringUtils.escapeString(dirs.get(0).toString()));
 for(int i = 1; i < dirs.size(); i++) {
  str.append(",").append(StringUtils.escapeString(dirs.get(i).toString()));
 }
 return str.toString();
}
private ValidWriteIdList extractValidWriteIdList() {
origin: OryxProject/oryx

/**
 * @return paths from {@link FileStatus}es into one comma-separated String
 * @see FileInputFormat#addInputPath(org.apache.hadoop.mapreduce.Job, Path)
 */
private static String joinFSPaths(FileSystem fs, FileStatus[] statuses) {
 StringBuilder joined = new StringBuilder();
 for (FileStatus status : statuses) {
  if (joined.length() > 0) {
   joined.append(',');
  }
  Path path = fs.makeQualified(status.getPath());
  joined.append(StringUtils.escapeString(path.toString()));
 }
 return joined.toString();
}
origin: apache/hive

/**
 * Take an array of strings and encode it into one string.
 */
public static String encodeArray(String[] plain) {
 if (plain == null)
  return null;
 String[] escaped = new String[plain.length];
 for (int i = 0; i < plain.length; ++i) {
  if (plain[i] == null) {
   plain[i] = "";
  }
  escaped[i] = StringUtils.escapeString(plain[i]);
 }
 return StringUtils.arrayToString(escaped);
}
origin: apache/hive

 .append(StringUtils.escapeString(qualifiedPath));
separator = StringUtils.COMMA_STR;
origin: apache/hive

String escapedQuery = StringUtils.escapeString(query, ESCAPE_CHAR, escapedChars);
String sql = String.format(SPLIT_QUERY, escapedQuery, numSplits);
try {
origin: apache/hive

for(String whProp : webhcatHiveProps) {
 hiveProps.append(hiveProps.length() > 0 ? "," : "").append(StringUtils.escapeString(whProp));
origin: apache/hive

job.set("mapred.input.dir", StringUtils.escapeString(currPath.toString()));
origin: apache/drill

protected FetchInputFormatSplit[] getNextSplits() throws Exception {
 while (getNextPath()) {
  // not using FileInputFormat.setInputPaths() here because it forces a connection to the
  // default file system - which may or may not be online during pure metadata operations
  job.set("mapred.input.dir", StringUtils.escapeString(currPath.toString()));
  // Fetch operator is not vectorized and as such turn vectorization flag off so that
  // non-vectorized record reader is created below.
  HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false);
  Class<? extends InputFormat> formatter = currDesc.getInputFileFormatClass();
  Utilities.copyTableJobPropertiesToConf(currDesc.getTableDesc(), job);
  InputFormat inputFormat = getInputFormatFromCache(formatter, job);
  InputSplit[] splits = inputFormat.getSplits(job, 1);
  FetchInputFormatSplit[] inputSplits = new FetchInputFormatSplit[splits.length];
  for (int i = 0; i < splits.length; i++) {
   inputSplits[i] = new FetchInputFormatSplit(splits[i], inputFormat);
  }
  if (work.getSplitSample() != null) {
   inputSplits = splitSampling(work.getSplitSample(), inputSplits);
  }
  if (inputSplits.length > 0) {
   return inputSplits;
  }
 }
 return null;
}
origin: apache/hive

  org.apache.hadoop.util.StringUtils.escapeString(parentDir.getAbsolutePath()));
inputSplits = inputFormat.getSplits(localJc, 1);
actualSplitNum = inputSplits.length;
origin: apache/drill

  org.apache.hadoop.util.StringUtils.escapeString(parentDir.getAbsolutePath()));
inputSplits = inputFormat.getSplits(localJc, 1);
actualSplitNum = inputSplits.length;
origin: apache/accumulo

private void addField(List<String> fields, String name, Object value) {
 String key = StringUtils.escapeString(name, '\\', new char[] {',', '='});
 String val = StringUtils.escapeString(String.valueOf(value), '\\', new char[] {',', '='});
 fields.add(key + '=' + val);
}
origin: ch.cern.hadoop/hadoop-common

/**
 * Escape commas in the string using the default escape char
 * @param str a string
 * @return an escaped string
 */
public static String escapeString(String str) {
 return escapeString(str, ESCAPE_CHAR, COMMA);
}

origin: org.apache.accumulo/accumulo-core

private void addField(List<String> fields, String name, Object value) {
 String key = StringUtils.escapeString(name, '\\', new char[] {',', '='});
 String val = StringUtils.escapeString(String.valueOf(value), '\\', new char[] {',', '='});
 fields.add(key + '=' + val);
}
origin: ml.shifu/guagua-yarn

public static void addInputPath(Configuration conf, Path path) throws IOException {
  path = path.getFileSystem(conf).makeQualified(path);
  String dirStr = org.apache.hadoop.util.StringUtils.escapeString(path.toString());
  String dirs = conf.get(GuaguaYarnConstants.GUAGUA_YARN_INPUT_DIR);
  conf.set(GuaguaYarnConstants.GUAGUA_YARN_INPUT_DIR, dirs == null ? dirStr : dirs + "," + dirStr);
}
origin: cwensel/cascading

public static void addInputPath( Configuration conf, Path path )
 {
 Path workingDirectory = getWorkingDirectory( conf );
 path = new Path( workingDirectory, path );
 String dirStr = StringUtils.escapeString( path.toString() );
 String dirs = conf.get( "mapred.input.dir" );
 conf.set( "mapred.input.dir", dirs == null ? dirStr :
  dirs + StringUtils.COMMA_STR + dirStr );
 }
origin: org.apache.hadoop/hadoop-mapreduce-client-app

<T> SummaryBuilder add(String key, T value) {
 String escapedString = StringUtils.escapeString(String.valueOf(value), 
   StringUtils.ESCAPE_CHAR, charsToEscape).replaceAll("\n", "\\\\n")
                       .replaceAll("\r", "\\\\r");
 return _add(key, escapedString);
}
origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

<T> SummaryBuilder add(String key, T value) {
 String escapedString = StringUtils.escapeString(String.valueOf(value),
   StringUtils.ESCAPE_CHAR, charsToEscape).replaceAll("\n", "\\\\n")
   .replaceAll("\r", "\\\\r");
 return _add(key, escapedString);
}
origin: com.github.jiayuhan-it/hadoop-mapreduce-client-app

<T> SummaryBuilder add(String key, T value) {
 String escapedString = StringUtils.escapeString(String.valueOf(value), 
   StringUtils.ESCAPE_CHAR, charsToEscape).replaceAll("\n", "\\\\n")
                       .replaceAll("\r", "\\\\r");
 return _add(key, escapedString);
}
org.apache.hadoop.utilStringUtilsescapeString

Javadoc

Escape commas in the string using the default escape char

Popular methods of StringUtils

  • stringifyException
    Make a string representation of the exception.
  • join
    Concatenates strings, using a separator.
  • split
  • arrayToString
  • toLowerCase
    Converts all of the characters in this String to lower case with Locale.ENGLISH.
  • startupShutdownMessage
    Print a log message for starting up and shutting down
  • getStrings
    Returns an arraylist of strings.
  • toUpperCase
    Converts all of the characters in this String to upper case with Locale.ENGLISH.
  • byteToHexString
    Given an array of bytes it will convert the bytes to a hex string representation of the bytes
  • formatTime
    Given the time in long milliseconds, returns a String in the format Xhrs, Ymins, Z sec.
  • unEscapeString
  • getStringCollection
    Returns a collection of strings.
  • unEscapeString,
  • getStringCollection,
  • byteDesc,
  • formatPercent,
  • getTrimmedStrings,
  • equalsIgnoreCase,
  • format,
  • formatTimeDiff,
  • getTrimmedStringCollection

Popular in Java

  • Start an intent from android
  • onRequestPermissionsResult (Fragment)
  • getExternalFilesDir (Context)
  • onCreateOptionsMenu (Activity)
  • System (java.lang)
    Provides access to system-related information and resources including standard input and output. Ena
  • Date (java.sql)
    A class which can consume and produce dates in SQL Date format. Dates are represented in SQL as yyyy
  • Enumeration (java.util)
    A legacy iteration interface.New code should use Iterator instead. Iterator replaces the enumeration
  • ResourceBundle (java.util)
    Resource bundles contain locale-specific objects. When your program needs a locale-specific resource
  • Loader (org.hibernate.loader)
    Abstract superclass of object loading (and querying) strategies. This class implements useful common
  • Scheduler (org.quartz)
    This is the main interface of a Quartz Scheduler. A Scheduler maintains a registery of org.quartz
Codota Logo
  • Products

    Search for Java codeSearch for JavaScript codeEnterprise
  • IDE Plugins

    IntelliJ IDEAWebStormAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogCodota Academy Plugin user guide Terms of usePrivacy policyJava Code IndexJavascript Code Index
Get Codota for your IDE now