@Override public void run() { log.debug("+++++ Started server job {}", server); try { final ThreadPoolExecutor executor = queryExecutorRepository.getExecutor(server); for (Query query : server.getQueries()) { ProcessQueryThread pqt = new ProcessQueryThread(resultProcessor, server, query); try { executor.submit(pqt); } catch (RejectedExecutionException ree) { log.error("Could not submit query {}. You could try to size the 'queryProcessorExecutor' to a larger size.", pqt, ree); } } log.debug("+++++ Finished server job {}", server); } catch (Exception e) { if (log.isDebugEnabled()) { log.warn("+++++ Failed server job " + server, e); } else { log.warn("+++++ Failed server job {}: {} {}", server, e.getClass().getName(), e.getMessage()); } } }
private Server merge(Server firstServer, Server secondServer) { return Server.builder(firstServer) .addQueries(secondServer.getQueries()) .build(); } }
/** * Shut down the output writers and clear the master server list * Used both during shutdown and when re-reading config files */ private void stopWriterAndClearMasterServerList() { for (Server server : this.masterServersList) { for (OutputWriter writer : server.getOutputWriters()) { try { writer.close(); } catch (LifecycleException ex) { log.error("Eror stopping writer: {}", writer); } } for (Query query : server.getQueries()) { for (OutputWriter writer : query.getOutputWriterInstances()) { try { writer.close(); log.debug("Stopped writer: {} for query: {}", writer, query); } catch (LifecycleException ex) { log.error("Error stopping writer: {} for query: {}", writer, query, ex); } } } } this.masterServersList = ImmutableList.of(); }
/** * Processes all the Servers into Job's * <p/> * Needs to be called after processFiles() */ private void processServersIntoJobs() throws LifecycleException { for (Server server : this.masterServersList) { try { // need to inject the poolMap for (Query query : server.getQueries()) { for (OutputWriter writer : query.getOutputWriterInstances()) { writer.start(); } } // Now validate the setup of each of the OutputWriter's per // query. this.validateSetup(server, server.getQueries()); // Now schedule the jobs for execution. this.serverScheduler.schedule(server); } catch (ValidationException ex) { throw new LifecycleException("Error validating json setup for query", ex); } } }
private void add(Server server) { TemporaryServer temporaryServer = singleton(server); temporaryServer.addQueries(server.getQueries()); temporaryServer.addOutputWriters(server.getOutputWriterFactories()); }
public void processServer(Server server) throws Exception { final ThreadPoolExecutor executor = queryExecutorRepository.getExecutor(server); for (Query query : server.getQueries()) { ProcessQueryThread pqt = new ProcessQueryThread(resultProcessor, server, query); try { executor.submit(pqt); } catch (RejectedExecutionException ree) { logger.error("Could not submit query {}. You could try to size the 'queryProcessorExecutor' to a larger size.", pqt, ree); } } } }
/** * Merges two lists of servers (and their queries). Based on the equality of * both sets of objects. Public for testing purposes. */ public static void mergeServerLists(List<Server> existing, List<Server> adding) { for (Server server : adding) { if (existing.contains(server)) { Server found = existing.get(existing.indexOf(server)); List<Query> queries = server.getQueries(); for (Query q : queries) { try { // no need to check for existing since this method // already does that found.addQuery(q); } catch (ValidationException ex) { // catching this exception because we don't want to stop // processing log.error("Error adding query: " + q + " to server" + server, ex); } } } else { existing.add(server); } } }
/** * Either invokes the queries multithreaded (max threads == * server.getMultiThreaded()) or invokes them one at a time. */ public static void processQueriesForServer(MBeanServerConnection mbeanServer, Server server) throws Exception { if (server.isQueriesMultiThreaded()) { ExecutorService service = null; try { service = Executors.newFixedThreadPool(server.getNumQueryThreads()); if (log.isDebugEnabled()) { log.debug("----- Creating " + server.getQueries().size() + " query threads"); } List<Callable<Object>> threads = new ArrayList<Callable<Object>>(server.getQueries().size()); for (Query query : server.getQueries()) { query.setServer(server); ProcessQueryThread pqt = new ProcessQueryThread(mbeanServer, query); threads.add(Executors.callable(pqt)); } service.invokeAll(threads); } finally { shutdownAndAwaitTermination(service); } } else { for (Query query : server.getQueries()) { query.setServer(server); processQuery(mbeanServer, query); } } }
private Server merge(Server firstServer, Server secondServer) { return Server.builder(firstServer) .addQueries(secondServer.getQueries()) .build(); } }
/** * Shut down the output writers and clear the master server list * Used both during shutdown and when re-reading config files */ private void stopWriterAndClearMasterServerList() { for (Server server : this.masterServersList) { for (OutputWriter writer : server.getOutputWriters()) { try { writer.close(); } catch (LifecycleException ex) { log.error("Eror stopping writer: {}", writer); } } for (Query query : server.getQueries()) { for (OutputWriter writer : query.getOutputWriterInstances()) { try { writer.close(); log.debug("Stopped writer: {} for query: {}", writer, query); } catch (LifecycleException ex) { log.error("Error stopping writer: {} for query: {}", writer, query, ex); } } } } this.masterServersList = ImmutableList.of(); }
/** * Shut down the output writers and clear the master server list * Used both during shutdown and when re-reading config files */ private void stopWriterAndClearMasterServerList() { for (Server server : this.masterServersList) { for (OutputWriter writer : server.getOutputWriters()) { try { writer.close(); } catch (LifecycleException ex) { log.error("Eror stopping writer: {}", writer); } } for (Query query : server.getQueries()) { for (OutputWriter writer : query.getOutputWriterInstances()) { try { writer.close(); log.debug("Stopped writer: {} for query: {}", writer, query); } catch (LifecycleException ex) { log.error("Error stopping writer: {} for query: {}", writer, query, ex); } } } } this.masterServersList = ImmutableList.of(); }
server.setAlias(serverAlias); List<Query> queries = server.getQueries(); for (Query query : queries) { List<OutputWriter> writers = query.getOutputWriters();
private void add(Server server) { TemporaryServer temporaryServer = singleton(server); temporaryServer.addQueries(server.getQueries()); temporaryServer.addOutputWriters(server.getOutputWriterFactories()); }
/** * Processes all the Servers into Job's * <p/> * Needs to be called after processFiles() */ private void processServersIntoJobs() throws LifecycleException { for (Server server : this.masterServersList) { try { // need to inject the poolMap for (Query query : server.getQueries()) { for (OutputWriter writer : query.getOutputWriterInstances()) { writer.start(); } } // Now validate the setup of each of the OutputWriter's per // query. this.validateSetup(server, server.getQueries()); // Now schedule the jobs for execution. this.scheduleJob(server); } catch (ParseException ex) { throw new LifecycleException("Error parsing cron expression: " + server.getCronExpression(), ex); } catch (SchedulerException ex) { throw new LifecycleException("Error scheduling job for server: " + server, ex); } catch (ValidationException ex) { throw new LifecycleException("Error validating json setup for query", ex); } } }
for (Query query : server.getQueries()) { for (OutputWriter writer : query.getOutputWriterInstances()) { writer.start(); this.validateSetup(server, server.getQueries());
public void process() throws LifecycleException, ValidationException, SchedulerException, ParseException { for (Server server : this.masterServersList) { if (server.isLocal()) { server.setLocalMBeanServer(mbeanServer); server.setAlias(fabricService.getCurrentContainerName()); } // need to inject the poolMap for (Query query : server.getQueries()) { query.setServer(server); for (OutputWriter writer : query.getOutputWriters()) { writer.setObjectPoolMap(getObjectPoolMap()); writer.start(); } } // Now validate the setup of each of the OutputWriter's per query. validateSetup(server.getQueries()); // Now schedule the jobs for execution. scheduleJob(server); } }