/** * Gets only the default values and adds some desirable properties for testing, */ public static SploutConfiguration getTestConfig() { SploutConfiguration properties = new SploutConfiguration(); PropertiesConfiguration config = load("", SPLOUT_PROPERTIES + ".default", true); properties.addConfiguration(config); // Activate replica balancing for tests properties.setProperty(QNodeProperties.REPLICA_BALANCE_ENABLE, true); // Disable wait for testing speedup. properties.setProperty(HazelcastProperties.DISABLE_WAIT_WHEN_JOINING, true); // Disable warming up - set it to only one second // that's enough since Hazelcast joining is by far slower properties.setProperty(QNodeProperties.WARMING_TIME, 1); // Disable HZ state storage properties.clearProperty(HazelcastProperties.HZ_PERSISTENCE_FOLDER); // We maintain compatibility with legacy tests by using JSON serialization between QNode and DNode properties.setProperty(QNodeProperties.DISABLE_BINARY_PROTOCOL, true); // We don't want the tests to mess around with more ports, in general properties.setProperty(DNodeProperties.STREAMING_API_DISABLE, true); return properties; }
public void runForever(int nDNodes) throws Exception { SploutConfiguration config = SploutConfiguration.get(); for (int i = 0; i < nDNodes; i++) { config = SploutConfiguration.get(); // we need to change some props for avoiding conflicts, ports, etc config.setProperty(DNodeProperties.PORT, config.getInt(DNodeProperties.PORT) + i); config.setProperty(DNodeProperties.DATA_FOLDER, config.getString(DNodeProperties.DATA_FOLDER) + "-" + i); config.setProperty(FetcherProperties.TEMP_DIR, config.getString(FetcherProperties.TEMP_DIR) + "-" + i); DNode dnode = new DNode(config, new DNodeHandler()); dnode.init(); } QNode qnode = new QNode(); qnode.start(config, new QNodeHandler()); }
tempDir = new File(config.getString(FetcherProperties.TEMP_DIR)); int httpPort = 0; int trials = 0; do { try { httpPort = config.getInt(HttpFileExchangerProperties.HTTP_PORT); server = HttpServer.create(new InetSocketAddress(config.getString(DNodeProperties.HOST), httpPort), config.getInt(HttpFileExchangerProperties.HTTP_BACKLOG)); bind = true; } catch (BindException e) { if (config.getBoolean(HttpFileExchangerProperties.HTTP_PORT_AUTO_INCREMENT)) { config.setProperty(HttpFileExchangerProperties.HTTP_PORT, httpPort + 1); } else { throw e; .getInt(HttpFileExchangerProperties.HTTP_THREADS_SERVER)); clientExecutors = Executors.newFixedThreadPool(config .getInt(HttpFileExchangerProperties.HTTP_THREADS_CLIENT)); server.setExecutor(serverExecutors); isInit.set(true);
@Override public void retryLogic() { testConfig.setProperty(DNodeProperties.PORT, testConfig.getInt(DNodeProperties.PORT) + 1); } };
public static Config build(SploutConfiguration buConf) throws HazelcastConfigBuilderException { if (buConf.getProperty(HazelcastProperties.USE_DEFAULT_OR_XML_CONFIG) != null) { String[] iFacesArr = buConf.getStringArray(HazelcastProperties.INTERFACES); if (iFacesArr.length != 0) { log.info("-- Using Hazelcast network interfaces: " + Joiner.on(", ").join(iFacesArr)); Integer prop = buConf.getInteger(HazelcastProperties.PORT, -1); if (prop != -1) { log.info("-- Using Hazelcast port: " + prop); cfg.setBackupCount(buConf.getInt(HazelcastProperties.BACKUP_COUNT)); cfg.setMergePolicy("hz.LATEST_UPDATE"); if (buConf.getBoolean(HazelcastProperties.DISABLE_WAIT_WHEN_JOINING, false)) { log.info("Disabling Hazelcast join wait time."); hzConfig.setProperty("hazelcast.wait.seconds.before.join", "0");
public void runForever(int nDNodes) throws Exception { SploutConfiguration config = SploutConfiguration.getTestConfig(); for (int i = 0; i < nDNodes; i++) { config = SploutConfiguration.getTestConfig(); // we need to change some props for avoiding conflicts, ports, etc config.setProperty(DNodeProperties.PORT, config.getInt(DNodeProperties.PORT) + i); config.setProperty(DNodeProperties.DATA_FOLDER, config.getString(DNodeProperties.DATA_FOLDER) + "-" + i); config.setProperty(FetcherProperties.TEMP_DIR, config.getString(FetcherProperties.TEMP_DIR) + "-" + i); DNode dnode = new DNode(config, new DNodeHandler()); dnode.init(); } }
thriftPort = config.getInt(DNodeProperties.PORT); try { serverTransport = new TNonblockingServerSocket(thriftPort); init = true; } catch (org.apache.thrift.transport.TTransportException e) { if (!config.getBoolean(DNodeProperties.PORT_AUTOINCREMENT)) { throw e; config.setProperty(DNodeProperties.PORT, thriftPort + 1); retries++; args.executorService(Executors.newFixedThreadPool(config.getInt(DNodeProperties.SERVING_THREADS))); args.processor(processor); if (handler instanceof DNodeHandler && !config.getBoolean(DNodeProperties.STREAMING_API_DISABLE)) { do { streamer = new TCPStreamer(); int tcpPort = config.getInteger(DNodeProperties.STREAMING_PORT, 8888); try { streamer.start(config, (DNodeHandler) handler); init = true; } catch (BindException e) { if (!config.getBoolean(DNodeProperties.PORT_AUTOINCREMENT)) { throw e; config.setProperty(DNodeProperties.STREAMING_PORT, tcpPort + 1); retries++;
public Fetcher(SploutConfiguration config) { tempDir = new File(config.getString(FetcherProperties.TEMP_DIR)); accessKey = config.getString(FetcherProperties.S3_ACCESS_KEY, null); secretKey = config.getString(FetcherProperties.S3_SECRET_KEY, null); downloadBufferSize = config.getInt(FetcherProperties.DOWNLOAD_BUFFER); bytesPerSecThrottle = config.getInt(FetcherProperties.BYTES_PER_SEC_THROTTLE); bytesToReportProgress = config.getLong(FetcherProperties.BYTES_TO_REPORT_PROGRESS); String fsName = config.getString(FetcherProperties.HADOOP_FS_NAME); hadoopConf = new Configuration(); if (fsName != null) { hadoopConf.set("fs.default.name", fsName); } log.info("Created " + Fetcher.class + " with tempDir = " + tempDir); if (bytesPerSecThrottle > 0) { log.info("Throttling at: " + bytesPerSecThrottle + " bytes per sec."); } else { log.warn("No throttling. Fetched data will be transferred at full speed. This may affect query servicing."); } }
SploutConfiguration properties = new SploutConfiguration(); PropertiesConfiguration config = load(rootDir, SPLOUT_PROPERTIES, false); if(config != null) { properties.addConfiguration(config); config = load(rootDir, SPLOUT_PROPERTIES + ".default", true); properties.addConfiguration(config);
protected static void configureTCP(SploutConfiguration buConf, JoinConfig join) throws HazelcastConfigBuilderException { log.info("Configuring SploutSQL for TCP/IP Hazelcast join."); join.getTcpIpConfig().setEnabled(true); join.getTcpIpConfig().setConnectionTimeoutSeconds( buConf.getInt(HazelcastProperties.TCP_CONNECTION_TIMEOUT_SECONDS, 20)); String[] tcpCluster = buConf.getStringArray(HazelcastProperties.TCP_CLUSTER); if (tcpCluster.length == 0) { throw new HazelcastConfigBuilderException("Enabled TCP join method but missing TCP Cluster key property (" + HazelcastProperties.TCP_CLUSTER + ")"); } try { // Comma separated hosts accepted. ArrayList<String> members = new ArrayList<String>(); members.add(Joiner.on(",").join(tcpCluster)); join.getTcpIpConfig().setMembers(members); } catch (Throwable e) { log.error("Invalid host in TCP cluster", e); throw new HazelcastConfigBuilderException("Invalid host in TCP cluster: " + tcpCluster); } String requiredMember = buConf.getString(HazelcastProperties.TCP_CLUSTER_REQUIRED_MEMBER); if (requiredMember != null) { try { join.getTcpIpConfig().setRequiredMember(requiredMember); } catch (Throwable e) { log.error("Invalid required host in TCP cluster", e); throw new HazelcastConfigBuilderException("Invalid required host in TCP cluster: " + requiredMember); } } }
@Override public void run() { try { log.info("Currently warming up for [" + config.getInt(QNodeProperties.WARMING_TIME) + "] - certain actions will only be taken afterwards."); Thread.sleep(config.getInt(QNodeProperties.WARMING_TIME) * 1000); log.info("Warming time ended [OK] Now the QNode will operate fully normally."); } catch (InterruptedException e) { log.error("Warming time interrupted - "); } context.getIsWarming().set(false); } };
public QNodeHandlerContext(SploutConfiguration config, CoordinationStructures coordinationStructures) { this.config = config; this.coordinationStructures = coordinationStructures; this.thriftClientPoolSize = config.getInt(QNodeProperties.DNODE_POOL_SIZE); this.dnodePoolTimeoutMillis = config.getLong(QNodeProperties.QNODE_DNODE_POOL_TAKE_TIMEOUT); this.replicaBalancer = new ReplicaBalancer(this); initMetrics(); }
public void maybeBalance() { // do this only after warming if (!isWarming.get() && config.getBoolean(QNodeProperties.REPLICA_BALANCE_ENABLE)) { // check if we could balance some partitions List<ReplicaBalancer.BalanceAction> balanceActions = getBalanceActions(); // we will only re-balance versions being served // otherwise strange things may happen: to re-balance a version in the middle of its deployment... Map<String, Long> versionsBeingServed = coordinationStructures.getCopyVersionsBeingServed(); for (ReplicaBalancer.BalanceAction action : balanceActions) { if (versionsBeingServed != null && versionsBeingServed.get(action.getTablespace()) != null && versionsBeingServed.get(action.getTablespace()) == action.getVersion()) { // put if absent + TTL coordinationStructures.getDNodeReplicaBalanceActionsSet().putIfAbsent(action, "", config.getLong(QNodeProperties.BALANCE_ACTIONS_TTL), TimeUnit.SECONDS); } } } }
/** * Loads into hzConfig all properties starting by "hazelcast." found * in Splout configuration * * @param buConf * @param hzConfig */ protected static void loadHazelcastRelatedConfig(SploutConfiguration buConf, Config hzConfig) { Iterator<String> hzKeys = buConf.getKeys("hazelcast"); while (hzKeys.hasNext()) { String hzKey = hzKeys.next(); hzConfig.setProperty(hzKey, Joiner.on(",").join(buConf.getStringArray(hzKey))); } } }
protected static void configureMulticast(SploutConfiguration buConf, JoinConfig join) { log.info("Configuring SploutSQL for MULTICAST Hazelcast join."); join.getMulticastConfig().setEnabled(true); String group = buConf.getString(HazelcastProperties.MULTICAST_GROUP); if (group != null) { log.info("-- Using multicast group: " + group); join.getMulticastConfig().setMulticastGroup(group); } Integer port = buConf.getInteger(HazelcastProperties.MULTICAST_PORT, -1); if (port != -1) { log.info("-- Using multicast port: " + port); join.getMulticastConfig().setMulticastPort(port); } }
public static SploutConfiguration get() { return get("."); }
public Querier(QNodeHandlerContext context) { super(context); if (context.getConfig().getBoolean(QNodeProperties.DISABLE_BINARY_PROTOCOL)) { this.useBinaryProtocol = false; } }
public void init(SploutConfiguration config) { this.config = config; timeoutThread = new TimeoutThread(config.getLong(DNodeProperties.MAX_QUERY_TIME)); timeoutThread.start(); }