@Override public OperationStatus[] batchMutate(Mutation[] mutations) throws IOException { return batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE); }
public OperationStatus[] batchMutate(Mutation[] mutations, boolean atomic, long nonceGroup, long nonce) throws IOException { // As it stands, this is used for 3 things // * batchMutate with single mutation - put/delete, separate or from checkAndMutate. // * coprocessor calls (see ex. BulkDeleteEndpoint). // So nonces are not really ever used by HBase. They could be by coprocs, and checkAnd... return batchMutate(new MutationBatchOperation(this, mutations, atomic, nonceGroup, nonce)); }
@Override public void mutateRow(RowMutations rm) throws IOException { // Don't need nonces here - RowMutations only supports puts and deletes final List<Mutation> m = rm.getMutations(); batchMutate(m.toArray(new Mutation[m.size()]), true, HConstants.NO_NONCE, HConstants.NO_NONCE); }
@Override public void doWork() throws IOException { try { region.batchMutate(finalBatchOp); } catch (IOException ioe) { LOG.error("test failed!", ioe); retFromThread.set(ioe); } finishedPuts.countDown(); } };
@Override public void doWork() throws IOException { startingPuts.countDown(); retFromThread.set(region.batchMutate(puts)); } };
public void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock, long nonceGroup, long nonce) throws IOException { batchMutate(new MutationBatchOperation(this, mutations.toArray(new Mutation[mutations.size()]), true, nonceGroup, nonce) { @Override
@Override public void run() { try { for (int i = 0; i < 100; i++) { byte[] row = Bytes.toBytes("putRow" + i); Put put = new Put(row); put.addColumn("cf".getBytes(), Bytes.toBytes(0), Bytes.toBytes("")); latch.await(); region.batchMutate(new Mutation[] { put }); Thread.sleep(10); } } catch (Throwable t) { LOG.warn("Error happend when Increment: ", t); } } }
public OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqId) throws IOException { if (!RegionReplicaUtil.isDefaultReplica(getRegionInfo()) && replaySeqId < lastReplayedOpenRegionSeqId) { // if it is a secondary replica we should ignore these entries silently // since they are coming out of order if (LOG.isTraceEnabled()) { LOG.trace(getRegionInfo().getEncodedName() + " : " + "Skipping " + mutations.length + " mutations with replaySeqId=" + replaySeqId + " which is < than lastReplayedOpenRegionSeqId=" + lastReplayedOpenRegionSeqId); for (MutationReplay mut : mutations) { LOG.trace(getRegionInfo().getEncodedName() + " : Skipping : " + mut.mutation); } } OperationStatus[] statuses = new OperationStatus[mutations.length]; for (int i = 0; i < statuses.length; i++) { statuses[i] = OperationStatus.SUCCESS; } return statuses; } return batchMutate(new ReplayBatchOperation(this, mutations, replaySeqId)); }
return false; meta.batchMutate(puts.toArray(new Put[puts.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE); meta.close(); if (meta.getWAL() != null) {
@Override public void run() { byte[] value = new byte[100]; Put[] in = new Put[1]; // iterate for the specified number of operations for (int i=0; i<numOps; i++) { // generate random bytes rand.nextBytes(value); // put the randombytes and verify that we can read it. This is one // way of ensuring that rwcc manipulation in HRegion.put() is fine. Put put = new Put(rowkey); put.addColumn(fam1, qual1, value); in[0] = put; try { OperationStatus[] ret = region.batchMutate(in); assertEquals(1, ret.length); assertEquals(OperationStatusCode.SUCCESS, ret[0].getOperationStatusCode()); assertGet(this.region, rowkey, fam1, qual1, value); } catch (IOException e) { assertTrue("Thread id " + threadNumber + " operation " + i + " failed.", false); } } } }
private void doBatchMutate(Mutation mutation) throws IOException { // Currently this is only called for puts and deletes, so no nonces. OperationStatus[] batchMutate = this.batchMutate(new Mutation[]{mutation}); if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.SANITY_CHECK_FAILURE)) { throw new FailedSanityCheckException(batchMutate[0].getExceptionMsg()); } else if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.BAD_FAMILY)) { throw new NoSuchColumnFamilyException(batchMutate[0].getExceptionMsg()); } else if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.STORE_TOO_BUSY)) { throw new RegionTooBusyException(batchMutate[0].getExceptionMsg()); } }
@Test public void testBatchPutWithTsSlop() throws Exception { // add data with a timestamp that is too recent for range. Ensure assert CONF.setInt("hbase.hregion.keyvalue.timestamp.slop.millisecs", 1000); final Put[] puts = new Put[10]; MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); long syncs = prepareRegionForBachPut(puts, source, true); OperationStatus[] codes = this.region.batchMutate(puts); assertEquals(10, codes.length); for (int i = 0; i < 10; i++) { assertEquals(OperationStatusCode.SANITY_CHECK_FAILURE, codes[i].getOperationStatusCode()); } metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source); }
@Test public void testBatchPut_whileNoRowLocksHeld() throws IOException { final Put[] puts = new Put[10]; MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); long syncs = prepareRegionForBachPut(puts, source, false); OperationStatus[] codes = this.region.batchMutate(puts); assertEquals(10, codes.length); for (int i = 0; i < 10; i++) { assertEquals(OperationStatusCode.SUCCESS, codes[i].getOperationStatusCode()); } metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 1, source); LOG.info("Next a batch put with one invalid family"); puts[5].addColumn(Bytes.toBytes("BAD_CF"), qual, value); codes = this.region.batchMutate(puts); assertEquals(10, codes.length); for (int i = 0; i < 10; i++) { assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY : OperationStatusCode.SUCCESS, codes[i].getOperationStatusCode()); } metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 2, source); }
puts[0] = put; region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE); MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf);
OperationStatus[] codes = this.region.batchMutate(batchOp); assertEquals(10, codes.length); for (int i = 0; i < 10; i++) { HConstants.NO_NONCE); thrown.expect(NoSuchColumnFamilyException.class); this.region.batchMutate(batchOp);
}; OperationStatus[] status = region.batchMutate(mutations); assertEquals(OperationStatusCode.SUCCESS, status[0].getOperationStatusCode()); assertEquals(OperationStatusCode.SANITY_CHECK_FAILURE, status[1].getOperationStatusCode());
OperationStatus[] codes = region.batchMutate(mArray, atomic, HConstants.NO_NONCE, HConstants.NO_NONCE); for (i = 0; i < codes.length; i++) {
private static void commitBatch(HRegion region, List<Pair<Mutation,Integer>> mutations, byte[] indexUUID) throws IOException { if (indexUUID != null) { for (Pair<Mutation,Integer> pair : mutations) { pair.getFirst().setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID); } } @SuppressWarnings("unchecked") Pair<Mutation,Integer>[] mutationArray = new Pair[mutations.size()]; // TODO: should we use the one that is all or none? region.batchMutate(mutations.toArray(mutationArray)); }
Pair<Mutation,Integer>[] mutations = new Pair[1]; mutations[0] = new Pair<Mutation,Integer>(m, lid); region.batchMutate(mutations); long serverTimestamp = MetaDataUtil.getClientTimeStamp(m);
Pair<Mutation,Integer>[] mutations = new Pair[1]; mutations[0] = new Pair<Mutation,Integer>(put, lid); region.batchMutate(mutations); return Sequence.replaceCurrentValueKV(result, newCurrentValueKV); } finally {