@Override public void retryListener(QueryBatch batch, QueryBatchListener queryBatchListener) { // We get the batch and modify the client alone in order to make use // of the new forest client in case if the original host is unavailable. DatabaseClient client = null; Forest[] forests = batch.getBatcher().getForestConfig().listForests(); for(Forest forest : forests) { if(forest.equals(batch.getForest())) client = getMoveMgr().getForestClient(forest); } QueryBatchImpl retryBatch = new QueryBatchImpl() .withClient( client ) .withBatcher( batch.getBatcher() ) .withTimestamp( batch.getTimestamp() ) .withServerTimestamp( batch.getServerTimestamp() ) .withItems( batch.getItems() ) .withJobTicket( batch.getJobTicket() ) .withJobBatchNumber( batch.getJobBatchNumber() ) .withJobResultsSoFar( batch.getJobResultsSoFar() ) .withForestBatchNumber( batch.getForestBatchNumber() ) .withForestResultsSoFar( batch.getForestResultsSoFar() ) .withForest( batch.getForest() ) .withJobTicket( batch.getJobTicket() ); queryBatchListener.processEvent(retryBatch); }
@Override public void processEvent(QueryBatch queryBatch) { String message = String.format("Processed batch number [%d]; job results so far: [%d]", queryBatch.getJobBatchNumber(), queryBatch.getJobResultsSoFar()); if (useLogger) { if (logger.isInfoEnabled()) { logger.info(message); } } else { System.out.println(message); } }
@Override public void processFailure(QueryBatch batch, Throwable throwable) { boolean isHostUnavailableException = processException(batch.getBatcher(), throwable, batch.getClient().getHost()); if ( isHostUnavailableException == true ) { try { logger.warn("Retrying failed listener batch: {}, results so far: {}, uris: {}", batch.getJobBatchNumber(), batch.getJobResultsSoFar(), Arrays.toString(batch.getItems())); batch.getBatcher().retryListener(batch, queryBatchListener); } catch (RuntimeException e) { logger.error("Exception during listener retry", e); processFailure(batch, e); } } } }
protected DocumentPage getDocs(QueryBatch batch) { GenericDocumentManager docMgr = batch.getClient().newDocumentManager(); if ( view != null ) docMgr.setSearchView(view); if ( categories != null ) docMgr.setMetadataCategories(categories); if ( nonDocumentFormat != null ) docMgr.setNonDocumentFormat(nonDocumentFormat); if ( consistentSnapshot == true ) { return ((GenericDocumentImpl) docMgr).read( batch.getServerTimestamp(), transform, batch.getItems() ); } else { return docMgr.read( transform, batch.getItems() ); } }
@Override public void processEvent(QueryBatch batch) { // Increment the number of batches that succeeded successBatchesCount.incrementAndGet(); // Get the number of documents that have been read successfully successEventsCount.addAndGet(batch.getItems().length); }
batch -> { successfulBatchCount.incrementAndGet(); totalResults.addAndGet(batch.getItems().length); if(queryBatcherChecks) { String forestName = batch.getForest().getForestName(); for ( String uri : batch.getItems() ) { matches.add(uri); batchDatabaseName.set(batch.getForest().getDatabaseName()); batchTicket.set(batch.getJobTicket()); batchTimestamp.set(batch.getTimestamp());
batch -> { ArrayNode uris = mapper.createArrayNode(); for ( String uri : batch.getItems() ) { uris.add( uri ); batch.getClient().newServerEval() .modulePath("/ext" + directory + "filterUrisReferencedByHolds.sjs") .addVariable("uris", new JacksonHandle(uris)) .evalAs(String.class); logger.info("DEBUG: [LegalHoldsTest] urisToDelete =[" + urisToDelete + "]"); logger.debug("DEBUG: [LegalHoldsTest] batch.getForest().getForestName()=[" + batch.getForest().getForestName() + "]"); batch.getClient().newDocumentManager().delete(urisToDelete.split(",")); for ( String uri : urisToDelete.split(",") ) { logger.info("DEBUG: [LegalHoldsTest] uri =[" + uri + "]"); synchronized (urisDeleted) { AtomicInteger deleted = urisDeleted.get(batch.getForest().getForestName()); if ( deleted == null ) { deleted = new AtomicInteger(); urisDeleted.put(batch.getForest().getForestName(), deleted);
@Override public boolean isComplete() { return totalResults > 0 ? queryBatch.getJobResultsSoFar() >= totalResults : false; }
/** * Determine the File to write to for the given query batch. * * @param queryBatch * @param exportDir * @return */ protected File getFileForBatch(QueryBatch queryBatch, File exportDir) { String filename = queryBatch.getJobBatchNumber() + filenameExtension; if (filenamePrefix != null) { filename = filenamePrefix + filename; } return new File(exportDir, filename); }
try { synchronized(writer) { for ( String uri : batch.getItems() ) { try { if (prefix != null) writer.write(prefix);
@Override public String getProgressAsString() { String text; if (totalResults > 0) { text = String.format("Progress: %d of %d; time %fs", queryBatch.getJobResultsSoFar(), totalResults, timeSoFarInSeconds); } else { text = String.format("Progress: %d results so far; time %fs", queryBatch.getJobResultsSoFar(), timeSoFarInSeconds); } if (timeSoFarInSeconds > 0) { double rate = queryBatch.getJobResultsSoFar() / timeSoFarInSeconds; BigDecimal bd = new BigDecimal(rate); rate = bd.round(new MathContext(5)).doubleValue(); return text + "; " + rate + " records/s"; } return text; }
if ( ! (batch.getClient() instanceof DatabaseClientImpl) ) { throw new IllegalStateException("DatabaseClient must be instanceof DatabaseClientImpl"); StringHandle uris = new StringHandle(String.join("\n", batch.getItems())) .withMimetype("text/uri-list"); RESTServices services = ((DatabaseClientImpl) batch.getClient()).getServices(); try { RequestParameters params = new RequestParameters(); ).lines().collect(Collectors.toList()); QueryBatchImpl processedBatch = new QueryBatchImpl() .withClient( batch.getClient() ) .withItems( responseUris.toArray(new String[responseUris.size()]) ) .withTimestamp( batch.getTimestamp() ) .withJobBatchNumber( batch.getJobBatchNumber() ) .withJobResultsSoFar( batch.getJobResultsSoFar() ) .withForestBatchNumber( batch.getForestBatchNumber() ) .withForestResultsSoFar( batch.getForestResultsSoFar() ) .withForest( batch.getForest() ) .withServerTimestamp( batch.getServerTimestamp() ) .withJobTicket( batch.getJobTicket() ); for ( QueryBatchListener listener : successListeners ) { try { List<String> skippedRequestUris = new ArrayList<>(Arrays.asList(batch.getItems())); skippedRequestUris.removeAll( responseUris ); if ( skippedRequestUris.size() > 0 ) { Arrays.asList(batch.getItems()));
/** * The standard BatchListener action called by QueryBatcher. */ @Override public void processEvent(QueryBatch batch) { try { batch.getClient().newDocumentManager().delete( batch.getItems() ); } catch (Throwable t) { for ( BatchFailureListener<Batch<String>> listener : failureListeners ) { try { listener.processFailure(batch, t); } catch (Throwable t2) { logger.error("Exception thrown by an onBatchFailure listener", t2); } } for ( BatchFailureListener<QueryBatch> queryBatchFailureListener : queryBatchFailureListeners ) { try { queryBatchFailureListener.processFailure(batch, t); } catch (Throwable t2) { logger.error("Exception thrown by an onFailure listener", t2); } } } }
public List<String> testQueryExceptions(QueryDefinition query, int expectedSuccesses, int expectedFailures) { QueryBatcher queryBatcher = newQueryBatcher(query) .onUrisReady( batch -> { throw new InternalError(errorMessage); } ) .onQueryFailure( queryThrowable -> { throw new InternalError(errorMessage); } ); testExceptions(queryBatcher, expectedSuccesses, expectedFailures); // collect the uris this time List<String> matchingUris = Collections.synchronizedList(new ArrayList<>()); queryBatcher = newQueryBatcher(query) .onUrisReady( batch -> matchingUris.addAll(Arrays.asList(batch.getItems())) ) .onUrisReady( batch -> { throw new RuntimeException(errorMessage); } ) .onQueryFailure( queryThrowable -> { throw new RuntimeException(errorMessage); } ); testExceptions(queryBatcher, expectedSuccesses, expectedFailures); return matchingUris; }
public void processEvent(QueryBatch batch) { final long jobResultsSoFar = batch.getJobResultsSoFar(); final long newResultsSoFar = this.resultsSoFar.updateAndGet(operand -> jobResultsSoFar > operand ? jobResultsSoFar : operand
private void testListenerException(QueryBatchListener listener) { final AtomicInteger failureBatchCount = new AtomicInteger(); Iterator<String> iterator = Arrays.asList(new String[] {uri1}).iterator(); QueryBatcher queryBatcher = moveMgr.newQueryBatcher(iterator) .onUrisReady( batch -> logger.debug("uri={}", batch.getItems()[0]) ) .onUrisReady(listener) .onQueryFailure( queryThrowable -> failureBatchCount.incrementAndGet() ); moveMgr.startJob(queryBatcher); queryBatcher.awaitCompletion(); moveMgr.stopJob(queryBatcher); // there should be no failure sent to the QueryBatcher onQueryFailure listeners assertEquals(0, failureBatchCount.get()); }
@Test public void batchesInOrder() throws Exception { listener = new ProgressListener(4, consumer); listener.initializeListener(null); // Ensure that the total amount of time will be greater than zero for when it's tested below Thread.sleep(100); listener.processEvent(new FakeQueryBatch(2)); assertFalse(consumer.lastProgressUpdate.isComplete()); listener.processEvent(new FakeQueryBatch(4)); assertTrue(consumer.lastProgressUpdate.isComplete()); assertTrue(consumer.texts.get(0).startsWith("Progress: 2 of 4; time ")); assertTrue(consumer.texts.get(0).contains("records/s")); assertTrue(consumer.texts.get(1).startsWith("Progress: 4 of 4; time ")); assertTrue(consumer.texts.get(1).contains("records/s")); assertEquals(2, consumer.texts.size()); // Verify the values on the ProgressUpdate are correct assertEquals(4, consumer.lastProgressUpdate.getQueryBatch().getJobResultsSoFar()); assertEquals(4, consumer.lastProgressUpdate.getTotalResults()); assertTrue(consumer.lastProgressUpdate.getStartTime() > 0); assertTrue(consumer.lastProgressUpdate.getTimeSoFarInSeconds() > 0); }