/** * Convert a RecordIdentifier. This is done so that we can use the RecordIdentifier in place * of the bucketing column. * @param i RecordIdentifier to convert * @return value of the bucket identifier */ public IntWritable evaluate(RecordIdentifier i) { if (i == null) { return null; } else { BucketCodec decoder = BucketCodec.determineVersion(i.getBucketProperty()); intWritable.set(decoder.decodeWriterId(i.getBucketProperty())); return intWritable; } }
private RecordIdentifier extractRecordIdentifier(OperationType operationType, List<String> newPartitionValues, Object record) throws BucketIdException { RecordIdentifier recordIdentifier = recordInspector.extractRecordIdentifier(record); int bucketIdFromRecord = BucketCodec.determineVersion( recordIdentifier.getBucketProperty()).decodeWriterId(recordIdentifier.getBucketProperty()); int computedBucketId = bucketIdResolver.computeBucketId(record); if (operationType != OperationType.DELETE && bucketIdFromRecord != computedBucketId) { throw new BucketIdException("RecordIdentifier.bucketId != computed bucketId (" + computedBucketId + ") for record " + recordIdentifier + " in partition " + newPartitionValues + "."); } return recordIdentifier; }
private int compareTo(RecordIdentifier other) { if (other == null) { return -1; } otherKey.set(other.getWriteId(), other.getBucketProperty(), other.getRowId()); return compareTo(otherKey); } @Override
/** * Copies relevant fields from {@code ri} to {@code struct} * @param ri * @param struct must be of size Field.values().size() */ public static void toArray(RecordIdentifier ri, Object[] struct) { assert struct != null && struct.length == Field.values().length; if(ri == null) { Arrays.fill(struct, null); return; } struct[Field.writeId.ordinal()] = ri.getWriteId(); struct[Field.bucketId.ordinal()] = ri.getBucketProperty(); struct[Field.rowId.ordinal()] = ri.getRowId(); } }
private void reconfigureState(OperationType operationType, List<String> newPartitionValues, Object record) throws WorkerException { RecordIdentifier newRecordIdentifier = extractRecordIdentifier(operationType, newPartitionValues, record); int newBucketId = newRecordIdentifier.getBucketProperty(); if (newPartitionValues == null) { newPartitionValues = Collections.emptyList(); } try { if (partitionHasChanged(newPartitionValues)) { if (table.createPartitions() && operationType == OperationType.INSERT) { partitionHelper.createPartitionIfNotExists(newPartitionValues); } Path newPartitionPath = partitionHelper.getPathForPartition(newPartitionValues); resetMutator(newBucketId, newPartitionValues, newPartitionPath); } else if (bucketIdHasChanged(newBucketId)) { resetMutator(newBucketId, partitionValues, partitionPath); } else { validateRecordSequence(operationType, newRecordIdentifier); } } catch (IOException e) { throw new WorkerException("Failed to reset mutator when performing " + operationType + " of record: " + record, e); } }
RecordIdentifier recordIdentifier = recordReader.getRecordIdentifier(); Record record = new Record(new RecordIdentifier(recordIdentifier.getWriteId(), recordIdentifier.getBucketProperty(), recordIdentifier.getRowId()), value.toString()); System.out.println(record); records.add(record);
setSARG(keyIntervalTmp, deleteEventReaderOptions, minKey.getBucketProperty(), maxKey.getBucketProperty(), minKey.getRowId(), maxKey.getRowId());
while (reader.next(id, struct)) { assertEquals("id " + record, record, id.getRowId()); assertEquals("bucket " + record, 0, id.getBucketProperty()); assertEquals("writeid " + record, 1, id.getWriteId()); assertEquals("a " + record, while (reader.next(id, struct)) { assertEquals("id " + record, record, id.getRowId()); assertEquals("bucket " + record, 0, id.getBucketProperty()); assertEquals("writeid " + record, 1, id.getWriteId()); assertEquals("a " + record,
assertEquals(20, id.getBucketProperty()); assertEquals(40, id.getRowId()); assertEquals("third", getValue(event)); assertEquals(50, id.getBucketProperty()); assertEquals(60, id.getRowId()); assertEquals("fourth", getValue(event));