@Override public int compare( Object baseObj1, long baseOff1, int baseLen1, Object baseObj2, long baseOff2, int baseLen2) { // Note that since ordering doesn't need the total length of the record, we just pass 0 // into the row. row1.pointTo(baseObj1, baseOff1 + 4, 0); row2.pointTo(baseObj2, baseOff2 + 4, 0); return ordering.compare(row1, row2); } }
/** * Resets the `startingOffset` according to the current cursor of row buffer, and clear out null * bits. This should be called before we write a new nested struct to the row buffer. */ public void reset() { this.startingOffset = holder.cursor; // grow the global buffer to make sure it has enough space to write fixed-length data. holder.grow(fixedSize); holder.cursor += fixedSize; zeroOutNullBytes(); }
public void setNull4Bytes(int ordinal) { setNullBit(ordinal); // put zero into the corresponding field when set null writeInt(getElementOffset(ordinal), 0); }
public Example6(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.arrayWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.arrayWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 2); }
private UnsafeRow makeValueRow(long v1, long v2) { UnsafeRowWriter writer = new UnsafeRowWriter(2); writer.reset(); writer.write(0, v1); writer.write(1, v2); return writer.getRow(); }
/** * Resets the `startingOffset` according to the current cursor of row buffer, and clear out null * bits. This should be called before we write a new nested struct to the row buffer. */ public void resetRowWriter() { this.startingOffset = cursor(); // grow the global buffer to make sure it has enough space to write fixed-length data. grow(fixedSize); increaseCursor(fixedSize); zeroOutNullBytes(); }
public void setNull8Bytes(int ordinal) { setNullBit(ordinal); // put zero into the corresponding field when set null writeLong(getElementOffset(ordinal), 0); }
public void setNull1Bytes(int ordinal) { setNullBit(ordinal); // put zero into the corresponding field when set null writeByte(getElementOffset(ordinal), (byte)0); }
public void write(int ordinal, int value) { final long offset = getFieldOffset(ordinal); writeLong(offset, 0L); writeInt(offset, value); }
BaseOrdering ordering = GenerateOrdering.create(keySchema); Supplier<RecordComparator> comparatorSupplier = () -> new KVComparator(ordering, keySchema.length());
public Example4(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.arrayWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.arrayWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); }
public void setNull4Bytes(int ordinal) { setNullBit(ordinal); // put zero into the corresponding field when set null writeInt(getElementOffset(ordinal), 0); }
public void setNull8Bytes(int ordinal) { setNullBit(ordinal); // put zero into the corresponding field when set null writeLong(getElementOffset(ordinal), 0); }
@Override public int compare( Object baseObj1, long baseOff1, int baseLen1, Object baseObj2, long baseOff2, int baseLen2) { // Note that since ordering doesn't need the total length of the record, we just pass -1 // into the row. row1.pointTo(baseObj1, baseOff1 + 4, -1); row2.pointTo(baseObj2, baseOff2 + 4, -1); return ordering.compare(row1, row2); } }
BaseOrdering ordering = GenerateOrdering.create(keySchema); Supplier<RecordComparator> comparatorSupplier = () -> new KVComparator(ordering, keySchema.length());
public Example5(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.arrayWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 2); }
@Override public int compare( Object baseObj1, long baseOff1, int baseLen1, Object baseObj2, long baseOff2, int baseLen2) { // Note that since ordering doesn't need the total length of the record, we just pass 0 // into the row. row1.pointTo(baseObj1, baseOff1 + 4, 0); row2.pointTo(baseObj2, baseOff2 + 4, 0); return ordering.compare(row1, row2); } }
BaseOrdering ordering = GenerateOrdering.create(keySchema); KVComparator recordComparator = new KVComparator(ordering, keySchema.length()); boolean canUseRadixSort = keySchema.length() == 1 &&
public Example7(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.arrayWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.arrayWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); }