Codota Logo
StringUtils.byteToHexString
Code IndexAdd Codota to your IDE (free)

How to use
byteToHexString
method
in
org.apache.hadoop.util.StringUtils

Best Java code snippets using org.apache.hadoop.util.StringUtils.byteToHexString (Showing top 20 results out of 315)

  • Add the Codota plugin to your IDE and get smart completions
private void myMethod () {
BufferedReader b =
  • Codota IconInputStream in;new BufferedReader(new InputStreamReader(in))
  • Codota IconReader in;new BufferedReader(in)
  • Codota IconFile file;new BufferedReader(new FileReader(file))
  • Smart code suggestions by Codota
}
origin: apache/hive

/**
 * Given an array of bytes it will convert the bytes to a hex string
 * representation of the bytes.
 * @param bytes Input bytes
 * @param start start index, inclusively
 * @param end end index, exclusively
 * @return hex string representation of the byte array
 */
public static String byteToHexString(byte[] bytes, int start, int end) {
 return org.apache.hadoop.util.StringUtils.byteToHexString(bytes, start, end);
}
origin: org.apache.hadoop/hadoop-common

/**
 * Convert a byte to a hex string.
 * @see #byteToHexString(byte[])
 * @see #byteToHexString(byte[], int, int)
 * @param b byte
 * @return byte's hex value as a String
 */
public static String byteToHexString(byte b) {
 return byteToHexString(new byte[] {b});
}
origin: org.apache.hadoop/hadoop-common

/** Same as byteToHexString(bytes, 0, bytes.length). */
public static String byteToHexString(byte bytes[]) {
 return byteToHexString(bytes, 0, bytes.length);
}
origin: org.apache.hadoop/hadoop-common

@Override
public String toString() {
 return "elector id=" + System.identityHashCode(this) +
  " appData=" +
  ((appData == null) ? "null" : StringUtils.byteToHexString(appData)) + 
  " cb=" + appClient;
}
origin: org.apache.hadoop/hadoop-common

/** Check the rpc response header. */
void checkResponse(RpcResponseHeaderProto header) throws IOException {
 if (header == null) {
  throw new EOFException("Response is null.");
 }
 if (header.hasClientId()) {
  // check client IDs
  final byte[] id = header.getClientId().toByteArray();
  if (!Arrays.equals(id, RpcConstants.DUMMY_CLIENT_ID)) {
   if (!Arrays.equals(id, clientId)) {
    throw new IOException("Client IDs not matched: local ID="
      + StringUtils.byteToHexString(clientId) + ", ID in response="
      + StringUtils.byteToHexString(header.getClientId().toByteArray()));
   }
  }
 }
}
origin: org.apache.hadoop/hadoop-common

/**
 * Try to delete the "ActiveBreadCrumb" node when gracefully giving up
 * active status.
 * If this fails, it will simply warn, since the graceful release behavior
 * is only an optimization.
 */
private void tryDeleteOwnBreadCrumbNode() {
 assert state == State.ACTIVE;
 LOG.info("Deleting bread-crumb of active node...");
 
 // Sanity check the data. This shouldn't be strictly necessary,
 // but better to play it safe.
 Stat stat = new Stat();
 byte[] data = null;
 try {
  data = zkClient.getData(zkBreadCrumbPath, false, stat);
  if (!Arrays.equals(data, appData)) {
   throw new IllegalStateException(
     "We thought we were active, but in fact " +
     "the active znode had the wrong data: " +
     StringUtils.byteToHexString(data) + " (stat=" + stat + ")");
  }
  
  deleteWithRetries(zkBreadCrumbPath, stat.getVersion());
 } catch (Exception e) {
  LOG.warn("Unable to delete our own bread-crumb of being active at {}." +
    ". Expecting to be fenced by the next active.", zkBreadCrumbPath, e);
 }
}
origin: org.apache.hadoop/hadoop-common

if (i >= nBytes) {
 throw new UTFDataFormatException("Truncated UTF8 at " +
   StringUtils.byteToHexString(bytes, i - 1, 1));
   StringUtils.byteToHexString(bytes, i - 1, 2));
if (i + 2 >= nBytes) {
 throw new UTFDataFormatException("Truncated UTF8 at " +
   StringUtils.byteToHexString(bytes, i - 1, 3));
  StringUtils.byteToHexString(bytes, i - 1, endForError));
origin: org.apache.hadoop/hadoop-common

LOG.info("Old node exists: {}", StringUtils.byteToHexString(data));
if (Arrays.equals(data, appData)) {
 LOG.info("But old node has our own data, so don't need to fence it.");
origin: org.apache.hadoop/hadoop-common

 @Override
 protected void processPath(PathData item) throws IOException {
  if (item.stat.isDirectory()) {
   throw new PathIsDirectoryException(item.toString());
  }
  FileChecksum checksum = item.fs.getFileChecksum(item.path);
  if (checksum == null) {
   out.printf("%s\tNONE\t%n", item.toString());
  } else {
   String checksumString = StringUtils.byteToHexString(
     checksum.getBytes(), 0, checksum.getLength());
   out.printf("%s\t%s\t%s%n",
     item.toString(), checksum.getAlgorithmName(),
     checksumString);
  }
 }
}
origin: apache/hive

private static void addInsertNonDirectoryInformation(Path p, FileSystem fileSystem,
  InsertEventRequestData insertData) throws IOException {
 insertData.addToFilesAdded(p.toString());
 FileChecksum cksum = fileSystem.getFileChecksum(p);
 String acidDirPath = AcidUtils.getFirstLevelAcidDirPath(p.getParent(), fileSystem);
 // File checksum is not implemented for local filesystem (RawLocalFileSystem)
 if (cksum != null) {
  String checksumString =
    StringUtils.byteToHexString(cksum.getBytes(), 0, cksum.getLength());
  insertData.addToFilesAddedChecksum(checksumString);
 } else {
  // Add an empty checksum string for filesystems that don't generate one
  insertData.addToFilesAddedChecksum("");
 }
 // acid dir will be present only for acid write operations.
 if (acidDirPath != null) {
  insertData.addToSubDirectoryList(acidDirPath);
 }
}
origin: org.apache.hadoop/hadoop-common

} catch (ChecksumException ce) {
  LOG.info("Found checksum error: b[" + off + ", " + (off+read) + "]="
    + StringUtils.byteToHexString(b, off, off + read), ce);
  if (retriesLeft == 0) {
   throw ce;
origin: apache/drill

   StringUtils.byteToHexString(cksum.getBytes(), 0, cksum.getLength());
 insertData.addToFilesAddedChecksum(checksumString);
} else {
origin: KylinOLAP/Kylin

  @Override
  protected void cleanup(Context context) throws IOException, InterruptedException {
    int nRegion = Math.round((float) gbPoints.size() / (float) cut);
    nRegion = Math.max(1,  nRegion);
    nRegion = Math.min(MAX_REGION, nRegion);
    
    int gbPerRegion = gbPoints.size() / nRegion;
    gbPerRegion = Math.max(1, gbPerRegion);
    
    System.out.println(nRegion + " regions");
    System.out.println(gbPerRegion + " GB per region");
    
    for (int i = gbPerRegion; i < gbPoints.size(); i += gbPerRegion) {
      Text key = gbPoints.get(i);
      outputValue.set(i);
      System.out.println(StringUtils.byteToHexString(key.getBytes()) + "\t" + outputValue.get());
      context.write(key, outputValue);
    }
  }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Save the ".md5" file that lists the md5sum of another file.
 * @param dataFile the original file whose md5 was computed
 * @param digest the computed digest
 * @throws IOException
 */
public static void saveMD5File(File dataFile, MD5Hash digest)
  throws IOException {
 final String digestString = StringUtils.byteToHexString(digest.getDigest());
 saveMD5File(dataFile, digestString);
}
origin: org.apache.hadoop/hadoop-hdfs

/** Convert a MD5MD5CRC32FileChecksum to a Json string. */
public static String toJsonString(final MD5MD5CRC32FileChecksum checksum) {
 if (checksum == null) {
  return null;
 }
 final Map<String, Object> m = new TreeMap<String, Object>();
 m.put("algorithm", checksum.getAlgorithmName());
 m.put("length", checksum.getLength());
 m.put("bytes", StringUtils.byteToHexString(checksum.getBytes()));
 return toJsonString(FileChecksum.class, m);
}
origin: KylinOLAP/Kylin

@SuppressWarnings("deprecation")
public byte[][] getSplits(Configuration conf, Path path) throws Exception {
  FileSystem fs = path.getFileSystem(conf);
  if (fs.exists(path) == false) {
    System.err.println("Path " + path + " not found, no region split, HTable will be one region");
    return null;
  }
  List<byte[]> rowkeyList = new ArrayList<byte[]>();
  SequenceFile.Reader reader = null;
  try {
    reader = new SequenceFile.Reader(fs, path, conf);
    Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
    Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
    while (reader.next(key, value)) {
      rowkeyList.add(((Text) key).copyBytes());
    }
  } catch (Exception e) {
    e.printStackTrace();
    throw e;
  } finally {
    IOUtils.closeStream(reader);
  }
  logger.info((rowkeyList.size() + 1) + " regions");
  logger.info(rowkeyList.size() + " splits");
  for (byte[] split : rowkeyList) {
    System.out.println(StringUtils.byteToHexString(split));
  }
  byte[][] retValue = rowkeyList.toArray(new byte[rowkeyList.size()][]);
  return retValue.length == 0 ? null : retValue;
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
protected HAServiceTarget dataToTarget(byte[] data) {
 ActiveNodeInfo proto;
 try {
  proto = ActiveNodeInfo.parseFrom(data);
 } catch (InvalidProtocolBufferException e) {
  throw new RuntimeException("Invalid data in ZK: " +
    StringUtils.byteToHexString(data));
 }
 NNHAServiceTarget ret = new NNHAServiceTarget(
   conf, proto.getNameserviceId(), proto.getNamenodeId());
 InetSocketAddress addressFromProtobuf = new InetSocketAddress(
   proto.getHostname(), proto.getPort());
 
 if (!addressFromProtobuf.equals(ret.getAddress())) {
  throw new RuntimeException("Mismatched address stored in ZK for " +
    ret + ": Stored protobuf was " + proto + ", address from our own " +
    "configuration for this NameNode was " + ret.getAddress());
 }
 
 ret.setZkfcPort(proto.getZkfcPort());
 return ret;
}
origin: org.apache.hadoop/hadoop-hdfs

try {
 if (LOG.isTraceEnabled()) {
  LOG.trace("data:" + StringUtils.byteToHexString(data));
origin: org.apache.hadoop/hadoop-hdfs

public short getPreferredBlockReplication() {
 short max = getFileReplication(CURRENT_STATE_ID);
 FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
 if (sf != null) {
  short maxInSnapshot = sf.getMaxBlockRepInDiffs(null);
  if (sf.isCurrentFileDeleted()) {
   return maxInSnapshot;
  }
  max = maxInSnapshot > max ? maxInSnapshot : max;
 }
 if(!isStriped()){
  return max;
 }
 ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getInstance()
   .getByID(getErasureCodingPolicyID());
 Preconditions.checkNotNull(ecPolicy, "Could not find EC policy with ID 0x"
   + StringUtils.byteToHexString(getErasureCodingPolicyID()));
 return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
}
origin: ch.cern.hadoop/hadoop-hdfs

static private void checkData(byte[] actual, int from, byte[] expected, int len,
  String message) {
 for (int idx = 0; idx < len; idx++) {
  if (expected[from + idx] != actual[idx]) {
   Assert.fail(message + " byte " + (from + idx) + " differs. expected "
     + expected[from + idx] + " actual " + actual[idx] +
     "\nexpected: " + StringUtils.byteToHexString(expected, from, from + len) +
     "\nactual:   " + StringUtils.byteToHexString(actual, 0, len));
  }
 }
}

org.apache.hadoop.utilStringUtilsbyteToHexString

Javadoc

Convert a byte to a hex string.

Popular methods of StringUtils

  • stringifyException
    Make a string representation of the exception.
  • join
    Concatenates strings, using a separator.
  • split
  • arrayToString
  • toLowerCase
    Converts all of the characters in this String to lower case with Locale.ENGLISH.
  • escapeString
  • startupShutdownMessage
    Print a log message for starting up and shutting down
  • getStrings
    Returns an arraylist of strings.
  • toUpperCase
    Converts all of the characters in this String to upper case with Locale.ENGLISH.
  • formatTime
    Given the time in long milliseconds, returns a String in the format Xhrs, Ymins, Z sec.
  • unEscapeString
  • getStringCollection
    Returns a collection of strings.
  • unEscapeString,
  • getStringCollection,
  • byteDesc,
  • formatPercent,
  • getTrimmedStrings,
  • equalsIgnoreCase,
  • format,
  • formatTimeDiff,
  • getTrimmedStringCollection

Popular in Java

  • Reading from database using SQL prepared statement
  • findViewById (Activity)
  • getSupportFragmentManager (FragmentActivity)
    Return the FragmentManager for interacting with fragments associated with this activity.
  • getSharedPreferences (Context)
  • FileInputStream (java.io)
    A FileInputStream obtains input bytes from a file in a file system. What files are available depends
  • Calendar (java.util)
    Calendar is an abstract base class for converting between a Date object and a set of integer fields
  • ThreadPoolExecutor (java.util.concurrent)
    An ExecutorService that executes each submitted task using one of possibly several pooled threads, n
  • ImageIO (javax.imageio)
  • BoxLayout (javax.swing)
  • Table (org.hibernate.mapping)
    A relational table
Codota Logo
  • Products

    Search for Java codeSearch for JavaScript codeEnterprise
  • IDE Plugins

    IntelliJ IDEAWebStormAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogCodota Academy Plugin user guide Terms of usePrivacy policyJava Code IndexJavascript Code Index
Get Codota for your IDE now