/** * Returns the address (host:port) of this DNode. */ public String whoAmI() { return config.getString(DNodeProperties.HOST) + ":" + config.getInt(DNodeProperties.PORT); }
protected static void configureTCP(SploutConfiguration buConf, JoinConfig join) throws HazelcastConfigBuilderException { log.info("Configuring SploutSQL for TCP/IP Hazelcast join."); join.getTcpIpConfig().setEnabled(true); join.getTcpIpConfig().setConnectionTimeoutSeconds( buConf.getInt(HazelcastProperties.TCP_CONNECTION_TIMEOUT_SECONDS, 20)); String[] tcpCluster = buConf.getStringArray(HazelcastProperties.TCP_CLUSTER); if (tcpCluster.length == 0) { throw new HazelcastConfigBuilderException("Enabled TCP join method but missing TCP Cluster key property (" + HazelcastProperties.TCP_CLUSTER + ")"); } try { // Comma separated hosts accepted. ArrayList<String> members = new ArrayList<String>(); members.add(Joiner.on(",").join(tcpCluster)); join.getTcpIpConfig().setMembers(members); } catch (Throwable e) { log.error("Invalid host in TCP cluster", e); throw new HazelcastConfigBuilderException("Invalid host in TCP cluster: " + tcpCluster); } String requiredMember = buConf.getString(HazelcastProperties.TCP_CLUSTER_REQUIRED_MEMBER); if (requiredMember != null) { try { join.getTcpIpConfig().setRequiredMember(requiredMember); } catch (Throwable e) { log.error("Invalid required host in TCP cluster", e); throw new HazelcastConfigBuilderException("Invalid required host in TCP cluster: " + requiredMember); } } }
/** * Returns the file where the DNode that uses the provided Configuration will * store the metadata for this tablespace, version and partition. */ public static File getLocalMetadataFile(SploutConfiguration config, String tablespace, int partition, long version) { String dataFolder = config.getString(DNodeProperties.DATA_FOLDER); return new File(dataFolder + "/" + getLocalMetadataFileRelativePath(tablespace, partition, version)); }
/** * Returns the address (host:port) of this DNode. */ public String getAddress() { return config.getString(DNodeProperties.HOST) + ":" + config.getInt(DNodeProperties.PORT); }
/** * Returns the folder where the DNode that uses the provided Configuration * will store the binary data for this tablespace, version and partition. */ public static File getLocalStorageFolder(SploutConfiguration config, String tablespace, int partition, long version) { String dataFolder = config.getString(DNodeProperties.DATA_FOLDER); return new File(dataFolder + "/" + getLocalStoragePartitionRelativePath(tablespace, partition, version)); }
public String address() { return "http://" + config.getString(DNodeProperties.HOST) + ":" + config.getInt(HttpFileExchangerProperties.HTTP_PORT); }
public String httpExchangerAddress() { return "http://" + config.getString(DNodeProperties.HOST) + ":" + config.getInt(HttpFileExchangerProperties.HTTP_PORT); }
public String getTCPAPIAddress() { return config.getString(DNodeProperties.HOST) + ":" + config.getInt(DNodeProperties.STREAMING_PORT); }
public Fetcher(SploutConfiguration config) { tempDir = new File(config.getString(FetcherProperties.TEMP_DIR)); accessKey = config.getString(FetcherProperties.S3_ACCESS_KEY, null); secretKey = config.getString(FetcherProperties.S3_SECRET_KEY, null); downloadBufferSize = config.getInt(FetcherProperties.DOWNLOAD_BUFFER); bytesPerSecThrottle = config.getInt(FetcherProperties.BYTES_PER_SEC_THROTTLE); bytesToReportProgress = config.getLong(FetcherProperties.BYTES_TO_REPORT_PROGRESS); String fsName = config.getString(FetcherProperties.HADOOP_FS_NAME); hadoopConf = new Configuration(); if (fsName != null) { hadoopConf.set("fs.default.name", fsName); } log.info("Created " + Fetcher.class + " with tempDir = " + tempDir); if (bytesPerSecThrottle > 0) { log.info("Throttling at: " + bytesPerSecThrottle + " bytes per sec."); } else { log.warn("No throttling. Fetched data will be transferred at full speed. This may affect query servicing."); } }
private void updateLocalTablespace(Map<String, Long> tablespacesAndVersions) throws IOException { log.info("Update local in-memory tablespace versions to serve: " + tablespacesAndVersions); if (tablespacesAndVersions == null) { return; } // CAREFUL TODO: That is not atomic. Something should // be done to make that update atomic. context.getCurrentVersionsMap().putAll(tablespacesAndVersions); String persistenceFolder = config.getString(HazelcastProperties.HZ_PERSISTENCE_FOLDER); if (persistenceFolder != null && !persistenceFolder.equals("")) { TablespaceVersionStore vStore = new TablespaceVersionStore(persistenceFolder); vStore.store(CoordinationStructures.KEY_FOR_VERSIONS_BEING_SERVED, tablespacesAndVersions); } }
File dataFolder = new File(config.getString(DNodeProperties.DATA_FOLDER)); File tablespaceFolder = new File(dataFolder, version.getTablespace()); File versionFolder = new File(tablespaceFolder, version.getVersion() + "");
protected static void configureMulticast(SploutConfiguration buConf, JoinConfig join) { log.info("Configuring SploutSQL for MULTICAST Hazelcast join."); join.getMulticastConfig().setEnabled(true); String group = buConf.getString(HazelcastProperties.MULTICAST_GROUP); if (group != null) { log.info("-- Using multicast group: " + group); join.getMulticastConfig().setMulticastGroup(group); } Integer port = buConf.getInteger(HazelcastProperties.MULTICAST_PORT, -1); if (port != -1) { log.info("-- Using multicast port: " + port); join.getMulticastConfig().setMulticastPort(port); } }
protected static void configureAWS(SploutConfiguration buConf, JoinConfig join) throws HazelcastConfigBuilderException { log.info("Configuring Splout for AWS auto-discovery Hazelcast join (http://www.hazelcast.com/docs/1.9.4/manual/multi_html/ch11s02.html)."); join.getAwsConfig().setEnabled(true); String key = buConf.getString(HazelcastProperties.AWS_KEY); if (key == null) { throw new HazelcastConfigBuilderException("Missing AWS Key property (" + HazelcastProperties.AWS_KEY + ")"); } String secretKey = buConf.getString(HazelcastProperties.AWS_SECRET); if (secretKey == null) { throw new HazelcastConfigBuilderException("Missing AWS Secret Key property (" + HazelcastProperties.AWS_SECRET + ")"); } join.getAwsConfig().setAccessKey(key); join.getAwsConfig().setSecretKey(secretKey); // Optionally add the security group String securityGroup = buConf.getString(HazelcastProperties.AWS_SECURITY_GROUP); if (securityGroup != null) { log.info("-- Using security group: " + securityGroup); join.getAwsConfig().setSecurityGroupName(securityGroup); } }
tempDir = new File(config.getString(FetcherProperties.TEMP_DIR)); int httpPort = 0; int trials = 0; try { httpPort = config.getInt(HttpFileExchangerProperties.HTTP_PORT); server = HttpServer.create(new InetSocketAddress(config.getString(DNodeProperties.HOST), httpPort), config.getInt(HttpFileExchangerProperties.HTTP_BACKLOG)); bind = true;
String persistenceFolder = config.getString(HazelcastProperties.HZ_PERSISTENCE_FOLDER); if (persistenceFolder != null && !persistenceFolder.equals("")) { TablespaceVersionStore vStore = new TablespaceVersionStore(persistenceFolder);
public void runForever(int nDNodes) throws Exception { SploutConfiguration config = SploutConfiguration.getTestConfig(); for (int i = 0; i < nDNodes; i++) { config = SploutConfiguration.getTestConfig(); // we need to change some props for avoiding conflicts, ports, etc config.setProperty(DNodeProperties.PORT, config.getInt(DNodeProperties.PORT) + i); config.setProperty(DNodeProperties.DATA_FOLDER, config.getString(DNodeProperties.DATA_FOLDER) + "-" + i); config.setProperty(FetcherProperties.TEMP_DIR, config.getString(FetcherProperties.TEMP_DIR) + "-" + i); DNode dnode = new DNode(config, new DNodeHandler()); dnode.init(); } }
public void runForever(int nDNodes) throws Exception { SploutConfiguration config = SploutConfiguration.get(); for (int i = 0; i < nDNodes; i++) { config = SploutConfiguration.get(); // we need to change some props for avoiding conflicts, ports, etc config.setProperty(DNodeProperties.PORT, config.getInt(DNodeProperties.PORT) + i); config.setProperty(DNodeProperties.DATA_FOLDER, config.getString(DNodeProperties.DATA_FOLDER) + "-" + i); config.setProperty(FetcherProperties.TEMP_DIR, config.getString(FetcherProperties.TEMP_DIR) + "-" + i); DNode dnode = new DNode(config, new DNodeHandler()); dnode.init(); } QNode qnode = new QNode(); qnode.start(config, new QNodeHandler()); }
status.setTcpAddress(getTCPAPIAddress()); status.setBalanceActionsStateMap(balanceActionsStateMap); File folder = new File(config.getString(DNodeProperties.DATA_FOLDER)); if (folder.exists()) { status.setFreeSpaceInDisk(FileSystemUtils.freeSpaceKb(folder.toString()));
protected static void configureJoinMethod(SploutConfiguration buConf, Config hzConfig) throws HazelcastConfigBuilderException { String joinMethod = buConf.getString(HazelcastProperties.JOIN_METHOD); if (joinMethod == null) { throw new HazelcastConfigBuilderException("No join method specified in configuration, must be one of: " + Arrays.toString(HazelcastProperties.JOIN_METHODS.values())); } joinMethod = joinMethod.toUpperCase(); HazelcastProperties.JOIN_METHODS method; try { method = HazelcastProperties.JOIN_METHODS.valueOf(joinMethod); } catch (IllegalArgumentException e) { throw new HazelcastConfigBuilderException("Invalid join method: " + joinMethod + " must be one of: " + Arrays.toString(HazelcastProperties.JOIN_METHODS.values())); } NetworkConfig network = hzConfig.getNetworkConfig(); JoinConfig join = network.getJoin(); // Disable all by default. Then we enable the correct one. join.getMulticastConfig().setEnabled(false); join.getTcpIpConfig().setEnabled(false); join.getAwsConfig().setEnabled(false); if (method.equals(HazelcastProperties.JOIN_METHODS.AWS)) { configureAWS(buConf, join); } else if (method.equals(HazelcastProperties.JOIN_METHODS.TCP)) { configureTCP(buConf, join); } else { configureMulticast(buConf, join); } }
server.start(); address = "http://" + config.getString(QNodeProperties.HOST) + ":" + config.getInt(QNodeProperties.PORT);