@Override public void release() { for ( DatabaseClient client : clientMap.values() ) { try { // don't release the primaryClient because we didn't create it, it was provided to us if ( primaryClient != client ) client.release(); } catch (Throwable t) { logger.error("Failed to release client for host \"" + client.getHost() + "\"", t); } } }
public DataMovementManagerImpl(DatabaseClient client) { setPrimaryClient(client); clientMap.put(primaryClient.getHost(), primaryClient); }
private synchronized void calucluateDeltas(Set<Forest> oldForests, Forest[] forests) { // the forests we haven't known about yet Set<Forest> addedForests = new HashSet<>(); // the forests that we knew about but they were black-listed and are no longer black-listed Set<Forest> restartedForests = new HashSet<>(); // any known forest might now be black-listed Set<Forest> blackListedForests = new HashSet<>(oldForests); for ( Forest forest : forests ) { if ( ! oldForests.contains(forest) ) { // we need to do special handling since we're adding this new forest after we're started addedForests.add(forest); } // if we have blackListedTasks for this forest, let's restart them if ( blackListedTasks.get(forest) != null ) restartedForests.add(forest); // this forest is not black-listed blackListedForests.remove(forest); } if ( blackListedForests.size() > 0 ) { DataMovementManagerImpl moveMgrImpl = getMoveMgr(); String primaryHost = moveMgrImpl.getPrimaryClient().getHost(); if ( getHostNames(blackListedForests).contains(primaryHost) ) { int randomPos = Math.abs(primaryHost.hashCode()) % clientList.get().size(); moveMgrImpl.setPrimaryClient(clientList.get().get(randomPos)); } } cleanupExistingTasks(addedForests, restartedForests, blackListedForests); }
"%s://%s:%d%s?job-id=%s&entity-name=%s&flow-name=%s&database=%s", client.getSecurityContext().getSSLContext() != null ? "https" : "http", client.getHost(), client.getPort(), "/v1/internal/hubcollector",
@Override public void processFailure(QueryBatch batch, Throwable throwable) { boolean isHostUnavailableException = processException(batch.getBatcher(), throwable, batch.getClient().getHost()); if ( isHostUnavailableException == true ) { try { logger.warn("Retrying failed listener batch: {}, results so far: {}, uris: {}", batch.getJobBatchNumber(), batch.getJobResultsSoFar(), Arrays.toString(batch.getItems())); batch.getBatcher().retryListener(batch, queryBatchListener); } catch (RuntimeException e) { logger.error("Exception during listener retry", e); processFailure(batch, e); } } } }
/** * This implements the WriteFailureListener interface * * @param batch the batch of WriteEvents * @param throwable the exception */ public void processFailure(WriteBatch batch, Throwable throwable) { boolean isHostUnavailableException = processException(batch.getBatcher(), throwable, batch.getClient().getHost()); if ( isHostUnavailableException == true ) { try { logger.warn("Retrying failed batch: {}, results so far: {}, uris: {}", batch.getJobBatchNumber(), batch.getJobWritesSoFar(), Stream.of(batch.getItems()).map(event->event.getTargetUri()).collect(Collectors.toList())); batch.getBatcher().retryWithFailureListeners(batch); } catch (RuntimeException e) { logger.error("Exception during retry", e); processFailure(batch, e); } } }
new AuthScope(client.getHost(), client.getPort(), AuthScope.ANY_REALM), new UsernamePasswordCredentials(username, password));
String primaryHost = moveMgrImpl.getPrimaryClient().getHost(); if ( removedHostInfos.containsKey(primaryHost) ) { int randomPos = Math.abs(primaryHost.hashCode()) % newHostInfos.length; if ( task instanceof BatchWriter ) { BatchWriter writerTask = (BatchWriter) task; if ( removedHostInfos.containsKey(writerTask.writeSet.getClient().getHost()) ) {
transactionInfo.written.set(true); logger.trace("begin write batch {} to forest on host \"{}\"", writeSet.getBatchNumber(), writeSet.getClient().getHost()); if ( writeSet.getTemporalCollection() == null ) { writeSet.getClient().newDocumentManager().write( logger.trace("failed batch sent to forest on host \"{}\"", writeSet.getClient().getHost()); Consumer<Throwable> onFailure = writeSet.getOnFailure(); if ( onFailure != null ) {
bean.setHost(databaseClient.getHost()); bean.setPort(databaseClient.getPort()); if (database != null) {
/** * This implements the QueryFailureListener interface * * @param queryBatch the exception with information about the failed query attempt */ public void processFailure(QueryBatchException queryBatch) { boolean isHostUnavailableException = processException(queryBatch.getBatcher(), queryBatch, queryBatch.getClient().getHost()); if ( isHostUnavailableException == true ) { try { logger.warn("Retrying failed batch: {}, results so far: {}, forest: {}, forestBatch: {}, forest results so far: {}", queryBatch.getJobBatchNumber(), queryBatch.getJobResultsSoFar(), queryBatch.getForest().getForestName(), queryBatch.getForestBatchNumber(), queryBatch.getForestResultsSoFar()); queryBatch.getBatcher().retryWithFailureListeners(queryBatch); } catch (RuntimeException e) { logger.error("Exception during retry", e); processFailure(new QueryBatchException(queryBatch, e)); } } }
", items: " + batch.getItems().length + ", writes so far: " + batch.getJobWritesSoFar() + ", host: " + batch.getClient().getHost()); }) .onBatchFailure( (batch, throwable) -> { ", batch: " + batch.getJobBatchNumber() + ", writes so far: " + batch.getJobWritesSoFar() + ", host: " + batch.getClient().getHost() + ", uris: " + Stream.of(batch.getItems()).map(event->event.getTargetUri()).collect(Collectors.toList())); ", batch: " + batch.getJobBatchNumber() + ", writes so far: " + batch.getJobWritesSoFar() + ", host: " + batch.getClient().getHost() + ", uris: " + Stream.of(batch.getItems()).map(event->event.getTargetUri()).collect(Collectors.toList()));
clerkClient = DatabaseClientFactory.newClient(finalClient.getHost(), finalPort, HubConfig.DEFAULT_FINAL_NAME, "SydneyGardner", "x", Authentication.DIGEST); officerClient = DatabaseClientFactory.newClient(finalClient.getHost(), finalPort, HubConfig.DEFAULT_FINAL_NAME, "GiannaEmerson", "x", Authentication.DIGEST);