/** * Print record to string builder * @param value * @param buf * @throws IOException */ private void printRecord(BytesRefArrayWritable value, StringBuilder buf) throws IOException { int n = value.size(); if (n > 0) { BytesRefWritable v = value.unCheckedGet(0); ByteBuffer bb = ByteBuffer.wrap(v.getData(), v.getStart(), v.getLength()); buf.append(decoder.decode(bb)); for (int i = 1; i < n; i++) { // do not put the TAB for the last column buf.append(RCFileCat.TAB); v = value.unCheckedGet(i); bb = ByteBuffer.wrap(v.getData(), v.getStart(), v.getLength()); buf.append(decoder.decode(bb)); } buf.append(RCFileCat.NEWLINE); } }
int byteLength = getLength(objectInspector, cachedByteArrayRef, rawBytesField.getStart(), rawBytesField.getLength()); if (byteLength == -1) { field.init(cachedByteArrayRef, rawBytesField.getStart(), byteLength); return field.getObject(); } else { if (getLength(objectInspector, cachedByteArrayRef, rawBytesField.getStart(), rawBytesField.getLength()) == -1) { return null;
for (int row=0; row<result.size(); row++) { brw = result.get(row); int start = brw.getStart(); int len = brw.getLength(); byte[] actualData = Arrays.copyOfRange(brw.getData(), start, start + len);
/** * Print record to string builder * @param value * @param buf * @throws IOException */ private void printRecord(BytesRefArrayWritable value, StringBuilder buf) throws IOException { int n = value.size(); if (n > 0) { BytesRefWritable v = value.unCheckedGet(0); ByteBuffer bb = ByteBuffer.wrap(v.getData(), v.getStart(), v.getLength()); buf.append(decoder.decode(bb)); for (int i = 1; i < n; i++) { // do not put the TAB for the last column buf.append(RCFileCat.TAB); v = value.unCheckedGet(i); bb = ByteBuffer.wrap(v.getData(), v.getStart(), v.getLength()); buf.append(decoder.decode(bb)); } buf.append(RCFileCat.NEWLINE); } }
/** * Print record to string builder * @param value * @param buf * @throws IOException */ private void printRecord(BytesRefArrayWritable value, StringBuilder buf) throws IOException { int n = value.size(); if (n > 0) { BytesRefWritable v = value.unCheckedGet(0); ByteBuffer bb = ByteBuffer.wrap(v.getData(), v.getStart(), v.getLength()); buf.append(decoder.decode(bb)); for (int i = 1; i < n; i++) { // do not put the TAB for the last column buf.append(RCFileCat.TAB); v = value.unCheckedGet(i); bb = ByteBuffer.wrap(v.getData(), v.getStart(), v.getLength()); buf.append(decoder.decode(bb)); } buf.append(RCFileCat.NEWLINE); } }
/** * Print record to string builder * @param value * @param buf * @throws IOException */ private void printRecord(BytesRefArrayWritable value, StringBuilder buf) throws IOException { int n = value.size(); if (n > 0) { BytesRefWritable v = value.unCheckedGet(0); ByteBuffer bb = ByteBuffer.wrap(v.getData(), v.getStart(), v.getLength()); buf.append(decoder.decode(bb)); for (int i = 1; i < n; i++) { // do not put the TAB for the last column buf.append(RCFileCat.TAB); v = value.unCheckedGet(i); bb = ByteBuffer.wrap(v.getData(), v.getStart(), v.getLength()); buf.append(decoder.decode(bb)); } buf.append(RCFileCat.NEWLINE); } }
private Writable updateColumnValue(RCFileColumn column, BytesRefWritable bytesRef) throws IOException { if(bytesRef.getLength() == 0) { // This is a null field. return NullWritable.get(); } Writable newColumnValue = column.newWritable(); // Small optimization to bypass DataInput read if the column writable is // BytesRefWritable if (newColumnValue.getClass() == BytesRefWritable.class) { newColumnValue = bytesRef; } else { byte[] currentRowBytes = Arrays.copyOfRange(bytesRef.getData(), bytesRef.getStart(), bytesRef.getStart() + bytesRef.getLength()); DataInput dataInput = ByteStreams.newDataInput(currentRowBytes); newColumnValue.readFields(dataInput); } return newColumnValue; }
/** * Print record to string builder * @param value * @param buf * @throws IOException */ private void printRecord(BytesRefArrayWritable value, StringBuilder buf) throws IOException { int n = value.size(); if (n > 0) { BytesRefWritable v = value.unCheckedGet(0); ByteBuffer bb = ByteBuffer.wrap(v.getData(), v.getStart(), v.getLength()); buf.append(decoder.decode(bb)); for (int i = 1; i < n; i++) { // do not put the TAB for the last column buf.append(RCFileCat.TAB); v = value.unCheckedGet(i); bb = ByteBuffer.wrap(v.getData(), v.getStart(), v.getLength()); buf.append(decoder.decode(bb)); } buf.append(RCFileCat.NEWLINE); } }
private Writable updateColumnValue(RCFileColumn column, BytesRefWritable bytesRef) throws IOException { if(bytesRef.getLength() == 0) { // This is a null field. return NullWritable.get(); } Writable newColumnValue = column.newWritable(); // Small optimization to bypass DataInput read if the column writable is // BytesRefWritable if (newColumnValue.getClass() == BytesRefWritable.class) { newColumnValue = bytesRef; } else { byte[] currentRowBytes = Arrays.copyOfRange(bytesRef.getData(), bytesRef.getStart(), bytesRef.getStart() + bytesRef.getLength()); DataInput dataInput = ByteStreams.newDataInput(currentRowBytes); newColumnValue.readFields(dataInput); } return newColumnValue; }
private void parseLongColumn(int column) { // don't include column number in message because it causes boxing which is expensive here checkArgument(!isPartitionColumn[column], "Column is a partition key"); loaded[column] = true; if (hiveColumnIndexes[column] >= value.size()) { // this partition may contain fewer fields than what's declared in the schema // this happens when additional columns are added to the hive table after a partition has been created nulls[column] = true; } else { BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]); byte[] bytes; try { bytes = fieldData.getData(); } catch (IOException e) { throw Throwables.propagate(e); } int start = fieldData.getStart(); int length = fieldData.getLength(); parseLongColumn(column, bytes, start, length); } }
private void parseDoubleColumn(int column) { // don't include column number in message because it causes boxing which is expensive here checkArgument(!isPartitionColumn[column], "Column is a partition key"); loaded[column] = true; if (hiveColumnIndexes[column] >= value.size()) { // this partition may contain fewer fields than what's declared in the schema // this happens when additional columns are added to the hive table after a partition has been created nulls[column] = true; } else { BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]); byte[] bytes; try { bytes = fieldData.getData(); } catch (IOException e) { throw Throwables.propagate(e); } int start = fieldData.getStart(); int length = fieldData.getLength(); parseDoubleColumn(column, bytes, start, length); } }
private void parseObjectColumn(int column) { // don't include column number in message because it causes boxing which is expensive here checkArgument(!isPartitionColumn[column], "Column is a partition key"); loaded[column] = true; if (hiveColumnIndexes[column] >= value.size()) { // this partition may contain fewer fields than what's declared in the schema // this happens when additional columns are added to the hive table after a partition has been created nulls[column] = true; } else { BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]); byte[] bytes; try { bytes = fieldData.getData(); } catch (IOException e) { throw Throwables.propagate(e); } int start = fieldData.getStart(); int length = fieldData.getLength(); parseObjectColumn(column, bytes, start, length); } }
private void parseBooleanColumn(int column) { // don't include column number in message because it causes boxing which is expensive here checkArgument(!isPartitionColumn[column], "Column is a partition key"); loaded[column] = true; if (hiveColumnIndexes[column] >= value.size()) { // this partition may contain fewer fields than what's declared in the schema // this happens when additional columns are added to the hive table after a partition has been created nulls[column] = true; } else { BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]); byte[] bytes; try { bytes = fieldData.getData(); } catch (IOException e) { throw Throwables.propagate(e); } int start = fieldData.getStart(); int length = fieldData.getLength(); parseBooleanColumn(column, bytes, start, length); } }
private void parseObjectColumn(int column) { // don't include column number in message because it causes boxing which is expensive here checkArgument(!isPartitionColumn[column], "Column is a partition key"); loaded[column] = true; if (hiveColumnIndexes[column] >= value.size()) { // this partition may contain fewer fields than what's declared in the schema // this happens when additional columns are added to the hive table after a partition has been created nulls[column] = true; } else { BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]); byte[] bytes; try { bytes = fieldData.getData(); } catch (IOException e) { throw Throwables.propagate(e); } int start = fieldData.getStart(); int length = fieldData.getLength(); parseObjectColumn(column, bytes, start, length); } }
private void parseStringColumn(int column) { // don't include column number in message because it causes boxing which is expensive here checkArgument(!isPartitionColumn[column], "Column is a partition key"); loaded[column] = true; if (hiveColumnIndexes[column] >= value.size()) { // this partition may contain fewer fields than what's declared in the schema // this happens when additional columns are added to the hive table after a partition has been created nulls[column] = true; } else { BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]); byte[] bytes; try { bytes = fieldData.getData(); } catch (IOException e) { throw Throwables.propagate(e); } int start = fieldData.getStart(); int length = fieldData.getLength(); parseStringColumn(column, bytes, start, length); } }
private void parseBooleanColumn(int column) { // don't include column number in message because it causes boxing which is expensive here checkArgument(!isPartitionColumn[column], "Column is a partition key"); loaded[column] = true; if (hiveColumnIndexes[column] >= value.size()) { // this partition may contain fewer fields than what's declared in the schema // this happens when additional columns are added to the hive table after a partition has been created nulls[column] = true; } else { BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]); byte[] bytes; try { bytes = fieldData.getData(); } catch (IOException e) { throw Throwables.propagate(e); } int start = fieldData.getStart(); int length = fieldData.getLength(); parseBooleanColumn(column, bytes, start, length); } }
private void parseLongColumn(int column) { // don't include column number in message because it causes boxing which is expensive here checkArgument(!isPartitionColumn[column], "Column is a partition key"); loaded[column] = true; if (hiveColumnIndexes[column] >= value.size()) { // this partition may contain fewer fields than what's declared in the schema // this happens when additional columns are added to the hive table after a partition has been created nulls[column] = true; } else { BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]); byte[] bytes; try { bytes = fieldData.getData(); } catch (IOException e) { throw Throwables.propagate(e); } int start = fieldData.getStart(); int length = fieldData.getLength(); parseLongColumn(column, bytes, start, length); } }
private void parseDoubleColumn(int column) { // don't include column number in message because it causes boxing which is expensive here checkArgument(!isPartitionColumn[column], "Column is a partition key"); loaded[column] = true; if (hiveColumnIndexes[column] >= value.size()) { // this partition may contain fewer fields than what's declared in the schema // this happens when additional columns are added to the hive table after a partition has been created nulls[column] = true; } else { BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]); byte[] bytes; try { bytes = fieldData.getData(); } catch (IOException e) { throw Throwables.propagate(e); } int start = fieldData.getStart(); int length = fieldData.getLength(); parseDoubleColumn(column, bytes, start, length); } }
private void parseStringColumn(int column) { // don't include column number in message because it causes boxing which is expensive here checkArgument(!isPartitionColumn[column], "Column is a partition key"); loaded[column] = true; if (hiveColumnIndexes[column] >= value.size()) { // this partition may contain fewer fields than what's declared in the schema // this happens when additional columns are added to the hive table after a partition has been created nulls[column] = true; } else { BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]); byte[] bytes; try { bytes = fieldData.getData(); } catch (IOException e) { throw Throwables.propagate(e); } int start = fieldData.getStart(); int length = fieldData.getLength(); parseStringColumn(column, bytes, start, length); } }
@Override public HiveReadableRecord parse(Writable value, HiveReadableRecord record) throws IOException { final BytesRefArrayWritable braw = (BytesRefArrayWritable) value; final ArrayRecord arrayRecord = (ArrayRecord) record; arrayRecord.reset(); for (int i = 0; i < columnIndexes.length; i++) { final int column = columnIndexes[i]; final BytesRefWritable fieldData = braw.unCheckedGet(column); final byte[] bytes = fieldData.getData(); final int start = fieldData.getStart(); final int length = fieldData.getLength(); if (length == "\\N".length() && bytes[start] == '\\' && bytes[start + 1] == 'N') { arrayRecord.setNull(column, true); } else { parsePrimitiveColumn(column, bytes, start, length); } } return arrayRecord; }