/** * This implements the WriteFailureListener interface * * @param batch the batch of WriteEvents * @param throwable the exception */ public void processFailure(WriteBatch batch, Throwable throwable) { boolean isHostUnavailableException = processException(batch.getBatcher(), throwable, batch.getClient().getHost()); if ( isHostUnavailableException == true ) { try { logger.warn("Retrying failed batch: {}, results so far: {}, uris: {}", batch.getJobBatchNumber(), batch.getJobWritesSoFar(), Stream.of(batch.getItems()).map(event->event.getTargetUri()).collect(Collectors.toList())); batch.getBatcher().retryWithFailureListeners(batch); } catch (RuntimeException e) { logger.error("Exception during retry", e); processFailure(batch, e); } } }
private void retry(WriteBatch batch, boolean callFailListeners) { if ( isStopped() == true ) { logger.warn("Job is now stopped, aborting the retry"); return; } if ( batch == null ) throw new IllegalArgumentException("batch must not be null"); boolean forceNewTransaction = true; BatchWriteSet writeSet = newBatchWriteSet(forceNewTransaction, batch.getJobBatchNumber()); if ( !callFailListeners ) { writeSet.onFailure(throwable -> { if ( throwable instanceof RuntimeException ) throw (RuntimeException) throwable; else throw new DataMovementException("Failed to retry batch", throwable); }); } for (WriteEvent doc : batch.getItems()) { writeSet.getWriteSet().add(doc.getTargetUri(), doc.getMetadata(), doc.getContent()); } BatchWriter runnable = new BatchWriter(writeSet); runnable.run(); } @Override
.onBatchSuccess( batch -> { logger.debug("[testAddMultiThreadedSuccess_Issue48] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); for(WriteEvent w:batch.getItems()){
logger.debug("[testStopBeforeFlush_Issue595] batch: " + batch.getJobBatchNumber() + ", items: " + batch.getItems().length + ", writes so far: " + batch.getJobWritesSoFar() + if ( cause instanceof InterruptedIOException ) { logger.debug("An expected InterruptedIOException occurred because the job was stopped prematurely" + ", batch: " + batch.getJobBatchNumber() + ", writes so far: " + batch.getJobWritesSoFar() + ", host: " + batch.getClient().getHost() + logger.debug("[testStopBeforeFlush_Issue595] Failed Batch: batch: " + batch.getJobBatchNumber() + ", batch: " + batch.getJobBatchNumber() + ", writes so far: " + batch.getJobWritesSoFar() + ", host: " + batch.getClient().getHost() +
batch -> { logger.debug("[testAddMultiThreadedSuccess_Issue61] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); for(WriteEvent w:batch.getItems()){
batch -> { logger.debug("[testWrites_{}] batch: {}, items: {}", testName, batch.getJobBatchNumber(), batch.getItems().length); successfulBatchCount.incrementAndGet(); for ( WriteEvent event : batch.getItems() ) { if ( batch.getJobBatchNumber() != expectedBatches ) { failures.append("ERROR: There should be " + expectedBatchSize + " items in batch " + batch.getJobBatchNumber() + " but there are " + batch.getItems().length); if ( batch.getJobBatchNumber() != expectedBatches ) { failures.append("ERROR: There should be " + expectedBatchSize + " items in batch " + batch.getJobBatchNumber() + " but there are " + batch.getItems().length);
batch -> { logger.debug("[testMultipleFlushAnStop_Issue109] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); for(WriteEvent w:batch.getItems()){
batch -> { logger.debug("[testSimple] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); for(WriteEvent w: batch.getItems()){ successBatch.append(w.getTargetUri()+":");
batch -> { logger.debug("[testWrites] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); successListenerWasRun.append("true"); if ( 2 != batch.getItems().length) { failures.append("ERROR: There should be 2 items in batch " + batch.getJobBatchNumber() + " but there are " + batch.getItems().length); failListenerWasRun.append("true"); if ( 2 != batch.getItems().length) { failures.append("ERROR: There should be 2 items in batch " + batch.getJobBatchNumber() + " but there are " + batch.getItems().length);
batch -> { logger.debug("[testUndeclaredFormat_Issue60] batch: {}, items: {}", batch.getJobBatchNumber(), batch.getItems().length); for(WriteEvent w:batch.getItems()){ successfulCount.incrementAndGet();
@Test public void testWriteBatcher() { assertEquals(null, client.newDocumentManager().exists("doc1.txt")); assertEquals(null, client.newDocumentManager().exists("doc2.txt")); // begin copy from "Using WriteBatcher" in src/main/java/com/marklogic/datamovement/package-info.java WriteBatcher whb = dataMovementManager.newWriteBatcher() .withBatchSize(100) .withThreadCount(20) .onBatchSuccess(batch -> { logger.debug("batch # {}, so far: {}", batch.getJobBatchNumber(), batch.getJobWritesSoFar()); }) .onBatchFailure((batch,throwable) -> throwable.printStackTrace() ); JobTicket ticket = dataMovementManager.startJob(whb); // the add or addAs methods could be called in separate threads on the // single whb instance whb.add ("doc1.txt", new StringHandle("doc1 contents")); whb.addAs("doc2.txt", "doc2 contents"); whb.flushAndWait(); // send the two docs even though they're not a full batch dataMovementManager.stopJob(ticket); // end copy from "Using WriteBatcher" in src/main/java/com/marklogic/datamovement/package-info.java assertTrue(null != client.newDocumentManager().exists("doc1.txt")); assertTrue(null != client.newDocumentManager().exists("doc2.txt")); } }