@Override public Status update(String table, String key, Map<String, ByteIterator> values) { Update update = this.kuduTable.newUpdate(); PartialRow row = update.getRow(); row.addString(KEY, key); for (int i = 1; i < schema.getColumnCount(); i++) { String columnName = schema.getColumnByIndex(i).getName(); ByteIterator b = values.get(columnName); if (b != null) { row.addStringUtf8(columnName, b.toArray()); } } apply(update); return Status.OK; }
throw new KunderaException("Cannot open table : " + entityMetadata.getTableName(), e); Operation operation = isUpdate ? table.newUpdate() : table.newInsert(); PartialRow row = operation.getRow(); populatePartialRow(row, entityMetadata, entity);
protected void processForUpdate(KuduExecutionContext kuduExecutionContext) { Update thisUpdate = kuduTable.newUpdate(); performCommonProcessing(thisUpdate,kuduExecutionContext); }
public static Operation toOperation(KuduTable table, KuduConnector.WriteMode writeMode) { switch (writeMode) { case INSERT: return table.newInsert(); case UPDATE: return table.newUpdate(); case UPSERT: return table.newUpsert(); } return table.newUpsert(); }
private Update createUpdate(int key) { Update update = table.newUpdate(); PartialRow row = update.getRow(); row.addInt(0, key); return update; }
private void updateRow(long keyOne, long keyTwo, int newCount) throws IOException { Update update = table.newUpdate(); PartialRow row = update.getRow(); row.addLong(COLUMN_KEY_ONE, keyOne); row.addLong(COLUMN_KEY_TWO, keyTwo); row.addInt(COLUMN_UPDATE_COUNT, newCount); session.apply(update); }
break; case UPDATE: operation = table.newUpdate(); break; case UPSERT:
private void persist(Context output, byte[][] data, boolean update) throws KuduException { for (int i = 0; i < data.length; i++) { Operation put = update ? table.newUpdate() : table.newInsert(); PartialRow row = put.getRow();
@Test(timeout = 100000) public void testBatchWithSameRow() throws Exception { KuduTable table = client.createTable(tableName, basicSchema, getBasicCreateTableOptions()); KuduSession session = client.newSession(); session.setFlushMode(SessionConfiguration.FlushMode.MANUAL_FLUSH); // Insert 25 rows, one per batch, along with 50 updates for each, and a delete at the end, // while also clearing the cache between each batch half the time. The delete is added here // so that a misplaced update would fail if it happens later than its delete. for (int i = 0; i < 25; i++) { session.apply(createInsert(table, i)); for (int j = 0; j < 50; j++) { Update update = table.newUpdate(); PartialRow row = update.getRow(); row.addInt(basicSchema.getColumnByIndex(0).getName(), i); row.addInt(basicSchema.getColumnByIndex(1).getName(), 1000); session.apply(update); } Delete del = table.newDelete(); PartialRow row = del.getRow(); row.addInt(basicSchema.getColumnByIndex(0).getName(), i); session.apply(del); session.flush(); if (i % 2 == 0) { asyncClient.emptyTabletsCacheForTable(table.getTableId()); } } assertEquals(0, countRowsInScan(client.newScannerBuilder(table).build())); }