@Test public void testCancelPeriodicFlush() throws InterruptedException, ExecutionException { Put put = new Put(Bytes.toBytes(0)).addColumn(CF, CQ, VALUE); try (AsyncBufferedMutatorImpl mutator = (AsyncBufferedMutatorImpl) CONN .getBufferedMutatorBuilder(TABLE_NAME).setWriteBufferPeriodicFlush(1, TimeUnit.SECONDS) .setWriteBufferSize(10 * put.heapSize()).build()) { List<CompletableFuture<?>> futures = new ArrayList<>(); futures.add(mutator.mutate(put)); Timeout task = mutator.periodicFlushTask; // we should have scheduled a periodic flush task assertNotNull(task); for (int i = 1;; i++) { futures.add(mutator.mutate(new Put(Bytes.toBytes(i)).addColumn(CF, CQ, VALUE))); if (mutator.periodicFlushTask == null) { break; } } assertTrue(task.isCancelled()); CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join(); AsyncTable<?> table = CONN.getTable(TABLE_NAME); for (int i = 0; i < futures.size(); i++) { assertArrayEquals(VALUE, table.get(new Get(Bytes.toBytes(i))).get().getValue(CF, CQ)); } } }
try (AsyncBufferMutatorForTest mutator = new AsyncBufferMutatorForTest(AsyncConnectionImpl.RETRY_TIMER, CONN.getTable(TABLE_NAME), 10 * put.heapSize(), TimeUnit.MILLISECONDS.toNanos(200))) { CompletableFuture<?> future = mutator.mutate(put); Timeout task = mutator.periodicFlushTask;
private int comparePuts(Put p1, Put p2) { int p1Size = p1.size(); int p2Size = p2.size(); int compare = p1Size - p2Size; if (compare == 0) { // TODO: make this a real comparison // this is a little cheating, but we don't really need to worry too much about this being // the same - chances are that exact matches here are really the same update. return Longs.compare(p1.heapSize(), p2.heapSize()); } return compare; }
private int comparePuts(Put p1, Put p2) { int p1Size = p1.size(); int p2Size = p2.size(); int compare = p1Size - p2Size; if (compare == 0) { // TODO: make this a real comparison // this is a little cheating, but we don't really need to worry too much about this being // the same - chances are that exact matches here are really the same update. return Longs.compare(p1.heapSize(), p2.heapSize()); } return compare; }
private int comparePuts(Put p1, Put p2) { int p1Size = p1.size(); int p2Size = p2.size(); int compare = p1Size - p2Size; if (compare == 0) { // TODO: make this a real comparison // this is a little cheating, but we don't really need to worry too much about this being // the same - chances are that exact matches here are really the same update. return Longs.compare(p1.heapSize(), p2.heapSize()); } return compare; }
private int comparePuts(Put p1, Put p2) { int p1Size = p1.size(); int p2Size = p2.size(); int compare = p1Size - p2Size; if (compare == 0) { // TODO: make this a real comparison // this is a little cheating, but we don't really need to worry too much about this being // the same - chances are that exact matches here are really the same update. return Longs.compare(p1.heapSize(), p2.heapSize()); } return compare; }
"Cannot write to BufferedWriter instance in state %s.", mState); if (mPutBuffer.containsKey(entityId)) { mCurrentWriteBufferSize -= mPutBuffer.get(entityId).heapSize(); mPutBuffer.get(entityId).add(family, qualifier, timestamp, value); mCurrentWriteBufferSize += mPutBuffer.get(entityId).heapSize(); } else { final Put put = new Put(entityId.getHBaseRowKey()) .add(family, qualifier, timestamp, value); mPutBuffer.put(entityId, put); mCurrentWriteBufferSize += put.heapSize();
currentWriteBufferSize += aPut.heapSize();
private void doPut(final List<Put> puts) throws IOException { int n = 0; for (Put put : puts) { validatePut(put); writeBuffer.add(put); currentWriteBufferSize += put.heapSize(); // we need to periodically see if the writebuffer is full instead of waiting until the end of the List n++; if (n % DOPUT_WB_CHECK == 0 && currentWriteBufferSize > writeBufferSize) { flushCommits(); } } if (autoFlush || currentWriteBufferSize > writeBufferSize) { flushCommits(); } }
@Test public void testCancelPeriodicFlush() throws InterruptedException, ExecutionException { Put put = new Put(Bytes.toBytes(0)).addColumn(CF, CQ, VALUE); try (AsyncBufferedMutatorImpl mutator = (AsyncBufferedMutatorImpl) CONN .getBufferedMutatorBuilder(TABLE_NAME).setWriteBufferPeriodicFlush(1, TimeUnit.SECONDS) .setWriteBufferSize(10 * put.heapSize()).build()) { List<CompletableFuture<?>> futures = new ArrayList<>(); futures.add(mutator.mutate(put)); Timeout task = mutator.periodicFlushTask; // we should have scheduled a periodic flush task assertNotNull(task); for (int i = 1;; i++) { futures.add(mutator.mutate(new Put(Bytes.toBytes(i)).addColumn(CF, CQ, VALUE))); if (mutator.periodicFlushTask == null) { break; } } assertTrue(task.isCancelled()); CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join(); AsyncTable<?> table = CONN.getTable(TABLE_NAME); for (int i = 0; i < futures.size(); i++) { assertArrayEquals(VALUE, table.get(new Get(Bytes.toBytes(i))).get().getValue(CF, CQ)); } } }
try (AsyncBufferMutatorForTest mutator = new AsyncBufferMutatorForTest(AsyncConnectionImpl.RETRY_TIMER, CONN.getTable(TABLE_NAME), 10 * put.heapSize(), TimeUnit.MILLISECONDS.toNanos(200))) { CompletableFuture<?> future = mutator.mutate(put); Timeout task = mutator.periodicFlushTask;
@Test @Ignore(value="We need a better test now that BigtableBufferedMutator has different logic") public void testBufferSizeFlush() throws Exception { int maxSize = 1024; BufferedMutatorParams params = new BufferedMutatorParams(sharedTestEnv.getDefaultTableName()) .writeBufferSize(maxSize); try (BufferedMutator mutator = getConnection().getBufferedMutator(params)) { // HBase 1.0.0 has a bug in it. It returns maxSize instead of the buffer size for // getWriteBufferSize. https://issues.apache.org/jira/browse/HBASE-13113 Assert.assertTrue( 0 == mutator.getWriteBufferSize() || maxSize == mutator.getWriteBufferSize()); Put put = getPut(); mutator.mutate(put); Assert.assertTrue(mutator.getWriteBufferSize() > 0); Put largePut = new Put(dataHelper.randomData("testrow-")); largePut.addColumn(COLUMN_FAMILY, qualifier, Bytes.toBytes(RandomStringUtils.randomAlphanumeric(maxSize * 2))); long heapSize = largePut.heapSize(); Assert.assertTrue("largePut heapsize is : " + heapSize, heapSize > maxSize); mutator.mutate(largePut); // HBase 1.0.0 has a bug in it. It returns maxSize instead of the buffer size for // getWriteBufferSize. https://issues.apache.org/jira/browse/HBASE-13113 Assert.assertTrue( 0 == mutator.getWriteBufferSize() || maxSize == mutator.getWriteBufferSize()); } }