public void testExceptions(QueryBatcher queryBatcher, int expectedSuccesses, int expectedFailures) { final AtomicInteger successfulBatchCount = new AtomicInteger(); final AtomicInteger failureBatchCount = new AtomicInteger(); queryBatcher .withBatchSize(1) .onUrisReady( batch -> successfulBatchCount.incrementAndGet() ) .onQueryFailure( queryThrowable -> failureBatchCount.incrementAndGet() ); moveMgr.startJob(queryBatcher); queryBatcher.awaitCompletion(); moveMgr.stopJob(queryBatcher); assertEquals(expectedSuccesses, successfulBatchCount.get()); assertEquals(expectedFailures, failureBatchCount.get()); }
public JobReportImpl(QueryBatcher batcher) { QueryJobReportListener queryJobSuccessListener = null; QueryJobReportListener queryJobFailureListener = null; QueryBatchListener[] batchListeners = batcher.getQuerySuccessListeners(); for(QueryBatchListener batchListener : batchListeners) { if(batchListener instanceof QueryJobReportListener) { QueryFailureListener[] failureListeners = batcher.getQueryFailureListeners(); for(QueryFailureListener failureListener : failureListeners) { if(failureListener instanceof QueryJobReportListener) { failureEventsCount = failureBatchesCount; successEventsCount = queryJobSuccessListener.getSuccessEventsCount(); isJobComplete = batcher.isStopped(); reportTimestamp = Calendar.getInstance(); jobStartTime = batcher.getJobStartTime(); jobEndTime = batcher.getJobEndTime();
public JobTicket startJob(QueryBatcher batcher, ConcurrentHashMap<String, JobTicket> activeJobs) { String jobId = batcher.getJobId() != null ? batcher.getJobId() : generateJobId(); if (batcher.getJobId() == null) batcher.withJobId(jobId); if (!batcher.isStarted() && activeJobs.containsKey(jobId)) { throw new DataMovementException( "Cannot start the batcher because the given job Id already exists in the active jobs", null); } JobTicket jobTicket = new JobTicketImpl(jobId, JobTicket.JobType.QUERY_BATCHER) .withQueryBatcher((QueryBatcherImpl) batcher); ((QueryBatcherImpl) batcher).start(jobTicket); activeJobs.put(jobId, jobTicket); return jobTicket; }
/** * Can be overridden by the subclass to prepare the QueryBatcher before the job is started. * * @param queryBatcher */ protected void prepareQueryBatcher(QueryBatcher queryBatcher) { super.prepareBatcher(queryBatcher); if (consistentSnapshot) { queryBatcher.withConsistentSnapshot(); } if (urisReadyListeners != null) { for (QueryBatchListener listener : urisReadyListeners) { queryBatcher.onUrisReady(listener); } } if (queryFailureListeners != null) { for (QueryFailureListener listener : queryFailureListeners) { queryBatcher.onQueryFailure(listener); } } }
final StringBuffer failures = new StringBuffer(); queryBatcher .withBatchSize(batchSize) .withThreadCount(threadCount) .onUrisReady( batch -> { successfulBatchCount.incrementAndGet(); .onQueryFailure( throwable -> { failureBatchCount.incrementAndGet(); .withJobId(queryBatcherJobId) .withJobName(queryBatcherJobName); assertEquals(batchSize, queryBatcher.getBatchSize()); assertEquals(threadCount, queryBatcher.getThreadCount()); assertEquals(queryBatcherJobId, queryBatcher.getJobId()); assertFalse("Job should not be stopped yet", queryBatcher.isStopped()); assertFalse("Job should not be started yet", queryBatcher.isStarted()); moveMgr.startJob(queryBatcher); long reportStartTime = new Date().getTime(); JobTicket ticket = moveMgr.getActiveJob(queryBatcherJobId); assertTrue("Job should be started now", queryBatcher.isStarted()); assertEquals(queryBatcherJobName, ticket.getBatcher().getJobName()); boolean finished = queryBatcher.awaitCompletion();
queryBatcher.withConsistentSnapshot(); QueryBatchListener[] existingListeners = queryBatcher.getQuerySuccessListeners(); if (existingListeners == null || existingListeners.length == 0) { queryBatcher.setUrisReadyListeners(urisReadyListeners.toArray(new QueryBatchListener[]{})); } else { List<QueryBatchListener> newListeners = new ArrayList<>(); newListeners.add(listener); queryBatcher.setUrisReadyListeners(newListeners.toArray(new QueryBatchListener[]{})); queryBatcher.onUrisReady(urisReadyListener); QueryFailureListener[] existingListeners = queryBatcher.getQueryFailureListeners(); if (existingListeners == null || existingListeners.length == 0) { queryBatcher.setQueryFailureListeners(queryFailureListeners.toArray(new QueryFailureListener[]{})); } else { List<QueryFailureListener> newListeners = new ArrayList<>(); newListeners.add(listener); queryBatcher.setQueryFailureListeners(newListeners.toArray(new QueryFailureListener[]{})); queryBatcher.awaitCompletion();
private void testListenerException(QueryBatchListener listener) { final AtomicInteger failureBatchCount = new AtomicInteger(); Iterator<String> iterator = Arrays.asList(new String[] {uri1}).iterator(); QueryBatcher queryBatcher = moveMgr.newQueryBatcher(iterator) .onUrisReady( batch -> logger.debug("uri={}", batch.getItems()[0]) ) .onUrisReady(listener) .onQueryFailure( queryThrowable -> failureBatchCount.incrementAndGet() ); moveMgr.startJob(queryBatcher); queryBatcher.awaitCompletion(); moveMgr.stopJob(queryBatcher); // there should be no failure sent to the QueryBatcher onQueryFailure listeners assertEquals(0, failureBatchCount.get()); }
batcher = dmm.newQueryBatcher(sqb.value(sqb.jsonProperty("jobId"), jobIds)); batcher.onUrisReady(new ExportListener().onDocumentReady(zipConsumer)); JobTicket jobTicket = dmm.startJob(batcher); batcher.awaitCompletion(); dmm.stopJob(batcher); dmm.release(); batcher = dmm.newQueryBatcher(sqb.value(sqb.element(new QName("jobId")), jobIds)); batcher.onUrisReady(new ExportListener().onDocumentReady(zipConsumer)); jobTicket = dmm.startJob(batcher); batcher.awaitCompletion(); dmm.stopJob(batcher); dmm.release();
public void testIteratorExceptions(List<String> uris, int expectedSuccesses, int expectedFailures) { QueryBatcher uriListBatcher = moveMgr.newQueryBatcher(uris.iterator()) .onUrisReady( batch -> { throw new InternalError(errorMessage); } ) .onQueryFailure( queryThrowable -> { throw new InternalError(errorMessage); } ); testExceptions(uriListBatcher, expectedSuccesses, expectedFailures); uriListBatcher = moveMgr.newQueryBatcher(uris.iterator()) .onUrisReady( batch -> { throw new RuntimeException(errorMessage); } ) .onQueryFailure( queryThrowable -> { throw new RuntimeException(errorMessage); } ); testExceptions(uriListBatcher, expectedSuccesses, expectedFailures); }
query.setCollections(qhbTestCollection); QueryBatcher queryBatcher = moveMgr.newQueryBatcher(query) .onUrisReady(batch -> { try { Thread.sleep(1000); .onJobCompletion(batcher -> { try { Thread.sleep(2000); }); moveMgr.startJob(queryBatcher); queryBatcher.awaitCompletion(); moveMgr.stopJob(queryBatcher); assertTrue("onJobCompletionListener is not called", jobCompletionFlag.get()); jobCompletionFlag.set(false); QueryBatcher queryBatcher2 = moveMgr.newQueryBatcher(query) .onUrisReady(batch -> { try { Thread.sleep(1000); .onJobCompletion(batcher -> { assertTrue("UrisReady listener is not completed yet", urisReadyFlag.get()); jobCompletionFlag.set(true); .onJobCompletion(batcher -> jobCompletionFlag.set(true)); moveMgr.startJob(queryBatcher3); queryBatcher3.awaitCompletion();
@Override public QueryBatcherJobTicket run(DatabaseClient databaseClient) { QueryBatcherJobTicket ticket = super.run(databaseClient); if (ticket.getQueryBatcher().isStopped()) { try { if (fileFooter != null) { fileWriter.write(fileFooter); } } catch (IOException ie) { throw new RuntimeException(ie); } finally { try { this.fileWriter.close(); } catch (IOException e) { throw new RuntimeException(e); } } } return ticket; }
@Override public QueryBatcherJobTicket run(DatabaseClient databaseClient) { DataMovementManager dmm = this.dataMovementManager != null ? this.dataMovementManager : databaseClient.newDataMovementManager(); String jobDescription = getJobDescription(); if (jobDescription != null && logger.isInfoEnabled()) { logger.info(jobDescription); } QueryBatcherBuilder builder = newQueryBatcherBuilder(); QueryBatcher queryBatcher = builder.buildQueryBatcher(databaseClient, dmm); prepareQueryBatcher(queryBatcher); JobTicket jobTicket = dmm.startJob(queryBatcher); if (awaitCompletion) { queryBatcher.awaitCompletion(); if (stopJobAfterCompletion) { dmm.stopJob(queryBatcher); } if (jobDescription != null && logger.isInfoEnabled()) { logger.info("Completed: " + jobDescription); } } return new QueryBatcherJobTicket(dmm, queryBatcher, jobTicket); }
QueryFailureListener[] queryFailureListeners = ((QueryBatcher) batcher).getQueryFailureListeners(); for (QueryFailureListener queryFailureListener : queryFailureListeners) { if ( queryFailureListener instanceof NoResponseListener ) {
@Override public void retryListener(QueryBatch batch, QueryBatchListener queryBatchListener) { // We get the batch and modify the client alone in order to make use // of the new forest client in case if the original host is unavailable. DatabaseClient client = null; Forest[] forests = batch.getBatcher().getForestConfig().listForests(); for(Forest forest : forests) { if(forest.equals(batch.getForest())) client = getMoveMgr().getForestClient(forest); } QueryBatchImpl retryBatch = new QueryBatchImpl() .withClient( client ) .withBatcher( batch.getBatcher() ) .withTimestamp( batch.getTimestamp() ) .withServerTimestamp( batch.getServerTimestamp() ) .withItems( batch.getItems() ) .withJobTicket( batch.getJobTicket() ) .withJobBatchNumber( batch.getJobBatchNumber() ) .withJobResultsSoFar( batch.getJobResultsSoFar() ) .withForestBatchNumber( batch.getForestBatchNumber() ) .withForestResultsSoFar( batch.getForestResultsSoFar() ) .withForest( batch.getForest() ) .withJobTicket( batch.getJobTicket() ); queryBatchListener.processEvent(retryBatch); }
query.setCollections(qhbTestCollection); QueryBatcher queryBatcher = moveMgr.newQueryBatcher(query) .onUrisReady(new CloseBatchListener()) .onQueryFailure(new CloseFailureListener()); queryBatcher.awaitCompletion(); moveMgr.stopJob(queryBatcher); assertTrue("Close method is not called on QueryBatchListener", calledBatchListener.get());
@Test public void testResultReplace() throws Exception { DocumentMetadataHandle meta = new DocumentMetadataHandle().withCollections(collection); // write the document client.newDocumentManager().writeAs(collection + "/test1.json", meta, "{ \"testProperty\": \"test1\" }"); StructuredQueryDefinition query = sqb.value(sqb.jsonProperty("testProperty"), "test1"); ServerTransform transform = new ServerTransform(transformName1) .addParameter("newValue", "test1a"); ApplyTransformListener listener = new ApplyTransformListener() .withTransform(transform) .withApplyResult(ApplyResult.REPLACE); QueryBatcher batcher = moveMgr.newQueryBatcher(query) .onUrisReady(listener); JobTicket ticket = moveMgr.startJob( batcher ); batcher.awaitCompletion(); moveMgr.stopJob(ticket); JsonNode docContents = docMgr.readAs(collection + "/test1.json", JsonNode.class); assertEquals( "the transform should have changed testProperty to 'test1a'", "test1a", docContents.get("testProperty").textValue() ); }
public List<String> testQueryExceptions(QueryDefinition query, int expectedSuccesses, int expectedFailures) { QueryBatcher queryBatcher = newQueryBatcher(query) .onUrisReady( batch -> { throw new InternalError(errorMessage); } ) .onQueryFailure( queryThrowable -> { throw new InternalError(errorMessage); } ); testExceptions(queryBatcher, expectedSuccesses, expectedFailures); // collect the uris this time List<String> matchingUris = Collections.synchronizedList(new ArrayList<>()); queryBatcher = newQueryBatcher(query) .onUrisReady( batch -> matchingUris.addAll(Arrays.asList(batch.getItems())) ) .onUrisReady( batch -> { throw new RuntimeException(errorMessage); } ) .onQueryFailure( queryThrowable -> { throw new RuntimeException(errorMessage); } ); testExceptions(queryBatcher, expectedSuccesses, expectedFailures); return matchingUris; }
QueryFailureListener[] queryFailureListeners = ((QueryBatcher) batcher).getQueryFailureListeners(); for(QueryFailureListener queryFailureListener : queryFailureListeners) { if ( queryFailureListener instanceof HostAvailabilityListener ) {
.withBatchSize(batchSize) .withThreadCount(threadCount) .withJobId(jobId) .onUrisReady((QueryBatch batch) -> { try { FlowResource flowResource; .onQueryFailure((QueryBatchException failure) -> { failedBatches.addAndGet(1); failedEvents.addAndGet(batchSize); queryBatcher.awaitCompletion();
@Test public void testResultIgnore() throws Exception { DocumentMetadataHandle meta = new DocumentMetadataHandle().withCollections(collection); // write the document client.newDocumentManager().writeAs(collection + "/test2.json", meta, "{ \"testProperty\": \"test2\" }"); StructuredQueryDefinition query = sqb.value(sqb.jsonProperty("testProperty"), "test2"); ServerTransform transform = new ServerTransform(transformName2) .addParameter("newValue", "test2a"); ApplyTransformListener listener = new ApplyTransformListener() .withTransform(transform) .withApplyResult(ApplyResult.IGNORE); QueryBatcher batcher = moveMgr.newQueryBatcher(query) .onUrisReady(listener); JobTicket ticket = moveMgr.startJob( batcher ); batcher.awaitCompletion(); moveMgr.stopJob(ticket); JsonNode docContents = docMgr.readAs(collection + "/test2.json", JsonNode.class); assertEquals( "the transform should have changed testProperty to 'test2a'", "test2a", docContents.get("testProperty").textValue() ); }