public String evaluate(Map<String, String> configMap, String key, String value) { HTableFactory.checkConfig(configMap); try { HTable table = HTableFactory.getHTable(configMap); Put thePut = new Put(key.getBytes()); thePut.add(configMap.get(HTableFactory.FAMILY_TAG).getBytes(), configMap.get(HTableFactory.QUALIFIER_TAG).getBytes(), value.getBytes()); table.put(thePut); table.flushCommits(); return "Put " + key + ":" + value; } catch (Exception exc) { LOG.error("Error while doing HBase Puts"); throw new RuntimeException(exc); } }
public String evaluate(Map<String, String> configMap, Map<String, String> keyValueMap) { HTableFactory.checkConfig(configMap); try { List<Put> putList = new ArrayList<Put>(); for (Map.Entry<String, String> keyValue : keyValueMap.entrySet()) { Put thePut = new Put(keyValue.getKey().getBytes()); thePut.add(configMap.get(HTableFactory.FAMILY_TAG).getBytes(), configMap.get(HTableFactory.QUALIFIER_TAG).getBytes(), keyValue.getValue().getBytes()); putList.add(thePut); } HTable table = HTableFactory.getHTable(configMap); table.put(putList); table.flushCommits(); return "Put " + keyValueMap.toString(); } catch (Exception exc) { LOG.error("Error while doing HBase Puts"); throw new RuntimeException(exc); } }
protected void batchUpdate(PutBuffer kvBuff, boolean flushCommits) throws HiveException { try { HTable htable = HTableFactory.getHTable(configMap); htable.put(kvBuff.putList); if (flushCommits) htable.flushCommits(); numPutRecords += kvBuff.putList.size(); if (kvBuff.putList.size() > 0) LOG.info(" Doing Batch Put " + kvBuff.putList.size() + " records; Total put records = " + numPutRecords + " ; Start = " + (new String(kvBuff.putList.get(0).getRow())) + " ; End = " + (new String(kvBuff.putList.get(kvBuff.putList.size() - 1).getRow()))); else LOG.info(" Doing Batch Put with ZERO 0 records"); kvBuff.putList.clear(); } catch (IOException e) { throw new HiveException(e); } }
primary.flushCommits(); primary.flushCommits();
primary.setAutoFlush(false); primary.put(Arrays.asList(p1, p2)); primary.flushCommits();
@Test public void testSimpleDeletes() throws Exception { HTable primary = createSetupTables(fam1); // do a simple Put long ts = 10; Put p = new Put(row1); p.add(FAM, indexed_qualifer, ts, value1); p.add(FAM, regular_qualifer, ts, value2); primary.put(p); primary.flushCommits(); Delete d = new Delete(row1); primary.delete(d); HTable index = new HTable(UTIL.getConfiguration(), fam1.getTable()); List<KeyValue> expected = Collections.<KeyValue> emptyList(); // scan over all time should cause the delete to be covered IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, 0, Long.MAX_VALUE, value1, HConstants.EMPTY_END_ROW); // scan at the older timestamp should still show the older value List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts, value1); // cleanup closeAndCleanupTables(index, primary); }
p.add(FAM, regular_qualifer, ts, value2); primary.put(p); primary.flushCommits(); p.add(FAM, indexed_qualifer, ts, value3); primary.put(p); primary.flushCommits();
/** * Test that a bunch of puts with a single timestamp across all the puts builds and inserts index * entries as expected * @throws Exception on failure */ @Test public void testSimpleTimestampedUpdates() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts = 10; p.add(FAM, indexed_qualifer, ts, value1); p.add(FAM, regular_qualifer, ts, value2); primary.put(p); primary.flushCommits(); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), getIndexTableName()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); // verify that the index matches IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1); // cleanup closeAndCleanupTables(primary, index1); }
p.add(FAM, indexed_qualifer, ts, value1); primary.put(p); primary.flushCommits(); p.add(FAM, indexed_qualifer, ts2, value2); primary.put(p); primary.flushCommits();
primary.flushCommits();
p.add(FAM2, indexed_qualifer, ts1, value1); primary.put(p); primary.flushCommits(); primary.flushCommits(); primary.flushCommits(); primary.flushCommits(); p.add(FAM2, indexed_qualifer, ts2, value2); primary.put(p); primary.flushCommits();
primary.put(p); try { primary.flushCommits(); fail("Shouldn't have gotten a successful write to the primary table"); } catch (RetriesExhaustedWithDetailsException e) {
p.add(FAM2, indexed_qualifer, ts, value3); primary.put(p); primary.flushCommits();
p.add(FAM2, indexed_qualifer, ts1, v1_1); primary.put(p); primary.flushCommits(); p.add(FAM2, indexed_qualifer, ts3, v3_1); primary.put(p); primary.flushCommits(); p.add(FAM2, indexed_qualifer, ts5, v5_1); primary.put(p); primary.flushCommits(); p.add(FAM2, indexed_qualifer, ts6, v6_1); primary.put(p); primary.flushCommits(); p.add(FAM, indexed_qualifer, ts2, value2); primary.put(p); primary.flushCommits();
p.add(FAM2, regular_qualifer, ts2, value3); primary.put(p); primary.flushCommits();
primary.flushCommits();
HTable primary = new HTable(conf, testTable.getTableName()); primary.put(p); primary.flushCommits(); p2.add(nonIndexedFamily, Bytes.toBytes("Not indexed"), Bytes.toBytes("non-indexed value")); primary.put(p2); primary.flushCommits();
d.deleteColumns(FAM, indexed_qualifer); primary.delete(d); primary.flushCommits(); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1);
@Override protected void store(byte[] row, Map<byte[], byte[]> values) throws IOException { if (values.isEmpty()) { return; } Put put = new Put(row); for (Map.Entry<byte[], byte[]> entry : values.entrySet()) { put.add(QueueEntryRow.COLUMN_FAMILY, entry.getKey(), entry.getValue()); } hTable.put(put); hTable.flushCommits(); }