public LazyTuple(Schema schema, byte[][] textBytes, long offset, byte[] nullBytes, SerializerDeserializer serde) { this.schema = schema; this.textBytes = textBytes; this.values = new Datum[schema.size()]; this.offset = offset; this.nullBytes = nullBytes; this.serializeDeserialize = serde; }
public PhysicalExec(final TaskAttemptContext context, final Schema inSchema, final Schema outSchema) { this.context = context; this.inSchema = inSchema; this.outSchema = outSchema; this.outColumnNum = outSchema.size(); }
public LazyTuple(Schema schema, byte[][] textBytes, long offset, byte[] nullBytes, SerializerDeserializer serde) { this.schema = schema; this.textBytes = textBytes; this.values = new Datum[schema.size()]; this.offset = offset; this.nullBytes = nullBytes; this.serializeDeserialize = serde; }
@Override public void enableStats() { if (inited) { throw new IllegalStateException("Should enable this option before init()"); } this.tableStatsEnabled = true; this.columnStatsEnabled = new boolean[schema.size()]; }
@Override public void enableStats() { if (inited) { throw new IllegalStateException("Should enable this option before init()"); } this.tableStatsEnabled = true; this.columnStatsEnabled = new boolean[schema.size()]; }
public RCFileAppender(Configuration conf, final Schema schema, final TableMeta meta, final Path path) throws IOException { super(conf, schema, meta, path); RECORD_INTERVAL = conf.getInt(RECORD_INTERVAL_CONF_STR, RECORD_INTERVAL); COLUMNS_BUFFER_SIZE = conf.getInt(COLUMNS_BUFFER_SIZE_CONF_STR, COLUMNS_BUFFER_SIZE); columnNumber = schema.size(); }
/** * Build an array of path filters for all levels with all accepting filter condition. * @param partitionColumns The partition columns schema * @return The array of path filter, accpeting all partition paths. */ public static PathFilter [] buildAllAcceptingPathFilters(Schema partitionColumns) { PathFilter [] filters = new PathFilter[partitionColumns.size()]; for (int i = 0; i < partitionColumns.size(); i++) { // loop from one to level filters[i] = new PartitionPathFilter(partitionColumns, null); } return filters; }
public NullScanner(Configuration conf, Schema schema, TableMeta meta, Fragment fragment) { this.conf = conf; this.meta = meta; this.schema = schema; this.fragment = fragment; this.tableStats = new TableStats(); this.columnNum = this.schema.size(); }
public FileScanner(Configuration conf, final Schema schema, final TableMeta meta, final FileFragment fragment) { this.conf = conf; this.meta = meta; this.schema = schema; this.fragment = fragment; this.tableStats = new TableStats(); this.columnNum = this.schema.size(); }
public TableStatistics(Schema schema, boolean[] columnStatsEnabled) { this.schema = schema; minValues = new VTuple(schema.size()); maxValues = new VTuple(schema.size()); numNulls = new long[schema.size()]; this.columnStatsEnabled = columnStatsEnabled; for (int i = 0; i < schema.size(); i++) { if (schema.getColumn(i).getDataType().getType().equals(Type.PROTOBUF)) { columnStatsEnabled[i] = false; } } }
public OutputHandler(TextLineDeserializer deserializer) { this.deserializer = deserializer; FIELD_DELIM = new String(CSVLineSerDe.getFieldDelimiter(deserializer.meta)); tuple = new VTuple(deserializer.schema.size()); }
private RowStoreEncoder(Schema schema) { this.schema = schema; nullFlags = new BitArray(schema.size()); headerSize = nullFlags.bytesLength(); } public byte [] toBytes(Tuple tuple) {
private RowStoreEncoder(Schema schema) { this.schema = schema; nullFlags = new BitArray(schema.size()); headerSize = nullFlags.bytesLength(); }
public static int[] getTargetIds(Schema inSchema, Schema outSchema) { int[] targetIds = new int[outSchema.size()]; int i = 0; for (Column target : outSchema.getColumns()) { targetIds[i] = inSchema.getColumnId(target.getQualifiedName()); i++; } return targetIds; }
public RowFileScanner(Configuration conf, final Schema schema, final TableMeta meta, final Fragment fragment) throws IOException { super(conf, schema, meta, fragment); SYNC_INTERVAL = conf.getInt(ConfVars.ROWFILE_SYNC_INTERVAL.varname, ConfVars.ROWFILE_SYNC_INTERVAL.defaultIntVal) * SYNC_SIZE; nullFlags = new BitArray(schema.size()); tupleHeaderSize = nullFlags.bytesLength() + (2 * Short.SIZE / 8); this.start = this.fragment.getStartKey(); this.end = this.start + this.fragment.getLength(); }
@Override public void addTuple(Tuple tuple) throws IOException { if (tableStatsEnabled) { for (int i = 0; i < schema.size(); ++i) { stats.analyzeField(i, tuple); } } writer.addTuple(tuple); if (tableStatsEnabled) { stats.incrementRow(); } }
public static int[] getTargetIds(Schema inSchema, Schema outSchema) { int[] targetIds = new int[outSchema.size()]; int i = 0; for (Column target : outSchema.getRootColumns()) { targetIds[i] = inSchema.getColumnId(target.getQualifiedName()); i++; } return targetIds; }
@Override public void init() { nullChars = TextLineSerDe.getNullCharsAsBytes(meta); delimiter = CSVLineSerDe.getFieldDelimiter(meta); columnNum = schema.size(); serde = new TextFieldSerializerDeserializer(meta); serde.init(schema); }
public HashAggregateExec(TaskAttemptContext ctx, GroupbyNode plan, PhysicalExec subOp) throws IOException { super(ctx, plan, subOp); hashKeyProjector = new KeyProjector(inSchema, plan.getGroupingColumns()); hashTable = new TupleMap<>(ctx.getQueryContext().getInt(SessionVars.AGG_HASH_TABLE_SIZE)); this.tuple = new VTuple(plan.getOutSchema().size()); }