@Override public void insert(Object record) throws IOException { updater.insert(writeId, record); }
@Override public void write(long writeId, byte[] record) throws StreamingIOFailure, SerializationError { try { Object encodedRow = encode(record); int bucket = getBucket(encodedRow); getRecordUpdater(bucket).insert(writeId, encodedRow); } catch (IOException e) { throw new StreamingIOFailure("Error writing record in transaction write id(" + writeId + ")", e); } }
@Override public void write(long writeId, byte[] record) throws StreamingIOFailure, SerializationError { try { Object encodedRow = encode(record); int bucket = getBucket(encodedRow); getRecordUpdater(bucket).insert(writeId, encodedRow); } catch (IOException e) { throw new StreamingIOFailure("Error writing record in transaction write id(" + writeId + ")", e); } }
@Test public void testInsertDelegates() throws IOException { mutator.insert(RECORD); verify(mockRecordUpdater).insert(WRITE_ID, RECORD); }
@Override public void write(long writeId, byte[] record) throws SerializationError, StreamingIOFailure { try { byte[] orderedFields = reorderFields(record); Object encodedRow = encode(orderedFields); int bucket = getBucket(encodedRow); getRecordUpdater(bucket).insert(writeId, encodedRow); } catch (IOException e) { throw new StreamingIOFailure("Error writing record in transaction write id (" + writeId + ")", e); } }
private void write(long writeId, Record record) throws StreamingException { checkAutoFlush(); try { Object encodedRow = encode(record); int bucket = getBucket(encodedRow); List<String> partitionValues = getPartitionValues(encodedRow); getRecordUpdater(partitionValues, bucket).insert(writeId, encodedRow); conn.getConnectionStats().incrementRecordsWritten(); } catch (IOException e) { throw new StreamingIOFailure("Error writing record in transaction write id (" + writeId + ")", e); } } }
.tableProperties(tblProps); RecordUpdater updater = new OrcRecordUpdater(root, options); updater.insert(11, new MyRow("first")); updater.insert(11, new MyRow("second")); updater.insert(11, new MyRow("third")); updater.flush(); updater.insert(12, new MyRow("fourth")); updater.insert(12, new MyRow("fifth")); updater.flush();
@Override public void write(final long writeId, final byte[] record) throws StreamingException { checkAutoFlush(); ingestSizeBytes += record.length; try { Object encodedRow = encode(record); int bucket = getBucket(encodedRow); List<String> partitionValues = getPartitionValues(encodedRow); getRecordUpdater(partitionValues, bucket).insert(writeId, encodedRow); // ingest size bytes gets resetted on flush() whereas connection stats is not conn.getConnectionStats().incrementRecordsWritten(); conn.getConnectionStats().incrementRecordsSize(record.length); } catch (IOException e) { throw new StreamingIOFailure("Error writing record in transaction write id (" + writeId + ")", e); } }
rowOutWriters[findWriterOffset(row)].write(recordValue); } else if (conf.getWriteType() == AcidUtils.Operation.INSERT) { fpaths.updaters[findWriterOffset(row)].insert(conf.getTableWriteId(), row); } else {
String[][] values = {new String[]{"a", "b", "c", "d", "e"}, new String[]{"f", "g", "h", "i", "j"}}; for(int i=0; i < values[0].length; ++i) { ru.insert(1, new MyRow(values[0][i])); ru = of.getRecordUpdater(root, options); for(int i=0; i < values[1].length; ++i) { ru.insert(2, new MyRow(values[1][i]));
ru.insert(options.getMaximumWriteId(), new MyRow("first")); ru.close(false);
String[] values= new String[]{"1", "2", "3", "4", "5"}; for(int i=0; i < values.length; ++i) { ru.insert(0, new MyRow(values[i])); values = new String[]{"6", "7", "8"}; for(int i=0; i < values.length; ++i) { ru.insert(1, new MyRow(values[i])); values = new String[]{"9", "10"}; for(int i=0; i < values.length; ++i) { ru.insert(3, new MyRow(values[i]));
RecordUpdater ru = of.getRecordUpdater(mockPath, options); for (int i = 0; i < 10; ++i) { ru.insert(options.getMinimumWriteId(), new MyRow(i, 2 * i));
RecordUpdater ru = of.getRecordUpdater(mockPath, options); for (int i = 0; i < 10; ++i) { ru.insert(options.getMinimumWriteId(), new MyRow(i, 2 * i));
updater.insert(options.getMinimumWriteId(), new DummyRow(1, 0, options.getMinimumWriteId(), bucket)); updater.insert(options.getMinimumWriteId(), new DummyRow(2, 1, options.getMinimumWriteId(), bucket)); updater.insert(options.getMinimumWriteId(), new DummyRow(3, 2, options.getMinimumWriteId(), bucket)); updater.close(false); .maximumWriteId(2); updater = new OrcRecordUpdater(root, options); updater.insert(options.getMinimumWriteId(), new DummyRow(4, 0, options.getMinimumWriteId(), bucket)); updater.insert(options.getMinimumWriteId(), new DummyRow(5, 1, options.getMinimumWriteId(), bucket)); updater.insert(options.getMinimumWriteId(), new DummyRow(6, 2, options.getMinimumWriteId(), bucket)); updater.close(false); .maximumWriteId(3); updater = new OrcRecordUpdater(root, options); updater.insert(options.getMinimumWriteId(), new DummyRow(7, 0, options.getMinimumWriteId(), bucket)); updater.insert(options.getMinimumWriteId(), new DummyRow(8, 1, options.getMinimumWriteId(), bucket)); updater.insert(options.getMinimumWriteId(), new DummyRow(9, 2, options.getMinimumWriteId(), bucket)); updater.close(false);
"2.0", "2.1", "3.0", "ignore.4", "ignore.5", "ignore.6"}; for(int i=0; i < values.length; ++i) { ru.insert(0, new BigRow(i, i, values[i], i, i));
.finalDestination(root); RecordUpdater updater = new OrcRecordUpdater(root, options); updater.insert(11, new MyRow("first")); updater.insert(11, new MyRow("second")); updater.insert(11, new MyRow("third")); updater.flush(); updater.insert(12, new MyRow("fourth")); updater.insert(12, new MyRow("fifth")); updater.flush(); updater.insert(20, new MyRow("sixth")); updater.close(false); reader = OrcFile.createReader(bucketPath,
for (long j = 0; j < NUM_ROWID_PER_OWID; ++j) { long payload = (i-1) * NUM_ROWID_PER_OWID + j; updater.insert(i, new DummyRow(payload, j, i, bucket));