@Override public void run() { try { log.info("Currently warming up for [" + config.getInt(QNodeProperties.WARMING_TIME) + "] - certain actions will only be taken afterwards."); Thread.sleep(config.getInt(QNodeProperties.WARMING_TIME) * 1000); log.info("Warming time ended [OK] Now the QNode will operate fully normally."); } catch (InterruptedException e) { log.error("Warming time interrupted - "); } context.getIsWarming().set(false); } };
@Override public void retryLogic() { testConfig.setProperty(DNodeProperties.PORT, testConfig.getInt(DNodeProperties.PORT) + 1); } };
/** * Returns the address (host:port) of this DNode. */ public String whoAmI() { return config.getString(DNodeProperties.HOST) + ":" + config.getInt(DNodeProperties.PORT); }
/** * Returns the address (host:port) of this DNode. */ public String getAddress() { return config.getString(DNodeProperties.HOST) + ":" + config.getInt(DNodeProperties.PORT); }
@Override public void run() { if (!isInit.get()) { throw new IllegalStateException("HTTP server must be init with init() method."); } server.start(); log.info("HTTP File exchanger LISTENING on port: " + config.getInt(HttpFileExchangerProperties.HTTP_PORT)); isListening.set(true); }
@Override public void retryLogic() { testConfig.setProperty(QNodeProperties.PORT, testConfig.getInt(QNodeProperties.PORT) + 1); } };
public String address() { return "http://" + config.getString(DNodeProperties.HOST) + ":" + config.getInt(HttpFileExchangerProperties.HTTP_PORT); }
public String httpExchangerAddress() { return "http://" + config.getString(DNodeProperties.HOST) + ":" + config.getInt(HttpFileExchangerProperties.HTTP_PORT); }
public String getTCPAPIAddress() { return config.getString(DNodeProperties.HOST) + ":" + config.getInt(DNodeProperties.STREAMING_PORT); }
/** * Registers the dnode in the cluster. This gives green ligth to use it. */ @Override public void giveGreenLigth() { int minutesToCheckRegister = config.getInt(HazelcastProperties.MAX_TIME_TO_CHECK_REGISTRATION, 5); int oldestMembersLeading = config.getInt(HazelcastProperties.OLDEST_MEMBERS_LEADING_COUNT, 3); dnodesRegistry = new DistributedRegistry(CoordinationStructures.DNODES, new DNodeInfo(config), hz, minutesToCheckRegister, oldestMembersLeading); dnodesRegistry.register(); }
public Fetcher(SploutConfiguration config) { tempDir = new File(config.getString(FetcherProperties.TEMP_DIR)); accessKey = config.getString(FetcherProperties.S3_ACCESS_KEY, null); secretKey = config.getString(FetcherProperties.S3_SECRET_KEY, null); downloadBufferSize = config.getInt(FetcherProperties.DOWNLOAD_BUFFER); bytesPerSecThrottle = config.getInt(FetcherProperties.BYTES_PER_SEC_THROTTLE); bytesToReportProgress = config.getLong(FetcherProperties.BYTES_TO_REPORT_PROGRESS); String fsName = config.getString(FetcherProperties.HADOOP_FS_NAME); hadoopConf = new Configuration(); if (fsName != null) { hadoopConf.set("fs.default.name", fsName); } log.info("Created " + Fetcher.class + " with tempDir = " + tempDir); if (bytesPerSecThrottle > 0) { log.info("Throttling at: " + bytesPerSecThrottle + " bytes per sec."); } else { log.warn("No throttling. Fetched data will be transferred at full speed. This may affect query servicing."); } }
Socket clientSocket = new Socket("localhost", config.getInt(DNodeProperties.STREAMING_PORT));
public void run() { Future<?> future = deployExecutor.submit(newDeployRunnable(deployActions, version)); deploysBeingExecuted.put(version, future); try { // This line makes the wait thread wait for the deploy as long as // the configuration tells // If the timeout passes a TimeoutException is thrown future.get(config.getInt(DNodeProperties.DEPLOY_TIMEOUT_SECONDS), TimeUnit.SECONDS); } catch (CancellationException e) { log.info("Cancelation when waiting for local deploy to finish - killing deployment " + "version[" + version + "]"); markDeployAsAborted(version, ExceptionUtils.getStackTrace(e)); } catch (InterruptedException e) { log.info("Interrupted exception waiting for local deploy to finish - killing deployment" + "version[" + version + "]"); markDeployAsAborted(version, ExceptionUtils.getStackTrace(e)); } catch (ExecutionException e) { log.warn("Execution exception waiting for local deploy to finish - killing deployment." + "version[" + version + "]", e); markDeployAsAborted(version, ExceptionUtils.getStackTrace(e)); } catch (TimeoutException e) { log.info("Timeout waiting for local deploy to finish - killing deployment." + "version[" + version + "]", e); markDeployAsAborted(version, "Timeout reached - " + config.getInt(DNodeProperties.DEPLOY_TIMEOUT_SECONDS) + " seconds"); lastDeployTimedout.set(true); } finally { // If the future didn't end, we just send an interrupt signal to it. future.cancel(true); deploysBeingExecuted.remove(version); } } };
public void start(SploutConfiguration config, DNodeHandler dNode) throws InterruptedException, IOException { this.dNode = dNode; this.tcpPort = config.getInt(DNodeProperties.STREAMING_PORT); server = new TCPServer(); server.bind(); Thread t = new Thread() { @Override public void run() { server.serve(); } }; t.start(); while (!server.isServing()) { Thread.sleep(100); } }
do { try { httpPort = config.getInt(HttpFileExchangerProperties.HTTP_PORT); server = HttpServer.create(new InetSocketAddress(config.getString(DNodeProperties.HOST), httpPort), config.getInt(HttpFileExchangerProperties.HTTP_BACKLOG)); bind = true; } catch (BindException e) { .getInt(HttpFileExchangerProperties.HTTP_THREADS_SERVER)); clientExecutors = Executors.newFixedThreadPool(config .getInt(HttpFileExchangerProperties.HTTP_THREADS_CLIENT)); server.setExecutor(serverExecutors); isInit.set(true);
public QNodeHandlerContext(SploutConfiguration config, CoordinationStructures coordinationStructures) { this.config = config; this.coordinationStructures = coordinationStructures; this.thriftClientPoolSize = config.getInt(QNodeProperties.DNODE_POOL_SIZE); this.dnodePoolTimeoutMillis = config.getLong(QNodeProperties.QNODE_DNODE_POOL_TAKE_TIMEOUT); this.replicaBalancer = new ReplicaBalancer(this); initMetrics(); }
protected static void configureTCP(SploutConfiguration buConf, JoinConfig join) throws HazelcastConfigBuilderException { log.info("Configuring SploutSQL for TCP/IP Hazelcast join."); join.getTcpIpConfig().setEnabled(true); join.getTcpIpConfig().setConnectionTimeoutSeconds( buConf.getInt(HazelcastProperties.TCP_CONNECTION_TIMEOUT_SECONDS, 20)); String[] tcpCluster = buConf.getStringArray(HazelcastProperties.TCP_CLUSTER); if (tcpCluster.length == 0) { throw new HazelcastConfigBuilderException("Enabled TCP join method but missing TCP Cluster key property (" + HazelcastProperties.TCP_CLUSTER + ")"); } try { // Comma separated hosts accepted. ArrayList<String> members = new ArrayList<String>(); members.add(Joiner.on(",").join(tcpCluster)); join.getTcpIpConfig().setMembers(members); } catch (Throwable e) { log.error("Invalid host in TCP cluster", e); throw new HazelcastConfigBuilderException("Invalid host in TCP cluster: " + tcpCluster); } String requiredMember = buConf.getString(HazelcastProperties.TCP_CLUSTER_REQUIRED_MEMBER); if (requiredMember != null) { try { join.getTcpIpConfig().setRequiredMember(requiredMember); } catch (Throwable e) { log.error("Invalid required host in TCP cluster", e); throw new HazelcastConfigBuilderException("Invalid required host in TCP cluster: " + requiredMember); } } }
public void runForever(int nDNodes) throws Exception { SploutConfiguration config = SploutConfiguration.getTestConfig(); for (int i = 0; i < nDNodes; i++) { config = SploutConfiguration.getTestConfig(); // we need to change some props for avoiding conflicts, ports, etc config.setProperty(DNodeProperties.PORT, config.getInt(DNodeProperties.PORT) + i); config.setProperty(DNodeProperties.DATA_FOLDER, config.getString(DNodeProperties.DATA_FOLDER) + "-" + i); config.setProperty(FetcherProperties.TEMP_DIR, config.getString(FetcherProperties.TEMP_DIR) + "-" + i); DNode dnode = new DNode(config, new DNodeHandler()); dnode.init(); } }
public void runForever(int nDNodes) throws Exception { SploutConfiguration config = SploutConfiguration.get(); for (int i = 0; i < nDNodes; i++) { config = SploutConfiguration.get(); // we need to change some props for avoiding conflicts, ports, etc config.setProperty(DNodeProperties.PORT, config.getInt(DNodeProperties.PORT) + i); config.setProperty(DNodeProperties.DATA_FOLDER, config.getString(DNodeProperties.DATA_FOLDER) + "-" + i); config.setProperty(FetcherProperties.TEMP_DIR, config.getString(FetcherProperties.TEMP_DIR) + "-" + i); DNode dnode = new DNode(config, new DNodeHandler()); dnode.init(); } QNode qnode = new QNode(); qnode.start(config, new QNodeHandler()); }