private void setupElasticsearchConf(InstanceConfiguration config) throws IOException { String pathConf = config.getClusterConfiguration().getPathConf(); if (pathConf != null && !pathConf.isEmpty()) { // Merge the user-defined config directory with the default one // This allows user to omit some configuration files (jvm.options for instance) File baseDir = new File(config.getBaseDir()); FileUtils.copyDirectory(new File(pathConf), new File(baseDir, "config")); } }
public InstanceConfiguration build() { InstanceConfiguration config = new InstanceConfiguration(); config.clusterConfiguration = clusterConfiguration; config.id = id; config.baseDir = baseDir; config.httpPort = httpPort; config.transportPort = transportPort; config.pathData = pathData; config.pathLogs = pathLogs; config.settings = settings; return config; } }
public Builder withInstanceConfiguration(InstanceConfiguration config) { this.log = config.getClusterConfiguration().getLog(); this.socketTimeout = config.getClusterConfiguration().getClientSocketTimeout(); this.port = config.getHttpPort(); return this; }
@Override public void execute(ClusterConfiguration config) { List<Integer> httpPorts = new ArrayList<>(); List<Integer> transportPorts = new ArrayList<>(); config.getInstanceConfigurationList().forEach(instanceConfig -> { httpPorts.add(instanceConfig.getHttpPort()); transportPorts.add(instanceConfig.getTransportPort()); }); // create additional sets to verify that there are no duplicates within the source lists Set<Integer> httpPortsSet = new HashSet<>(httpPorts); Set<Integer> transportPortsSet = new HashSet<>(transportPorts); Set<Integer> intersection = new HashSet<>(httpPortsSet); intersection.retainAll(transportPortsSet); if (httpPortsSet.size() != httpPorts.size() || transportPortsSet.size() != transportPorts.size() || intersection.size() > 0) { throw new ElasticsearchSetupException( "We have conflicting ports in the list of HTTP ports [" + StringUtils.join(httpPorts, ',') + "] and the list of transport ports [" + StringUtils.join(transportPorts, ',') + "]"); } }
"-Ecluster.name=" + config.getClusterConfiguration().getClusterName(), false); cmd.addArgument("-Ehttp.port=" + config.getHttpPort(), false); cmd.addArgument("-Etransport.tcp.port=" + config.getTransportPort(), false); List<String> hosts = config.getClusterConfiguration().getInstanceConfigurationList() .stream() .filter(config -> config != this.config) .map(config -> "127.0.0.1:" + config.getTransportPort()) .collect(Collectors.toList()); if (hosts.isEmpty() == false) if (config.getClusterConfiguration().isAutoCreateIndex() == false) if (config.getSettings() != null) config.getSettings().forEach((key, value) -> cmd.addArgument("-E" + key + '=' + value));
boolean disableLogging) Log log = config.getClusterConfiguration().getLog(); int instanceId = config.getId(); File baseDir = new File(config.getBaseDir());
public ForkedElasticsearchProcessDestroyer(final InstanceConfiguration config) { this.log = config.getClusterConfiguration().getLog(); Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { ForkedElasticsearchProcessDestroyer.this.terminateProcess(config); } }); }
try pid = ProcessUtil.getElasticsearchPid(config.getBaseDir()); log.debug("Read PID '" + pid + "' from pid file"); config.getId())); config.getId())); ProcessUtil.cleanupPid(config.getBaseDir()); config.getId())); ProcessUtil.cleanupPid(config.getBaseDir()); config.getId()), e);
@Override public void execute(ClusterConfiguration config) { String baseDir = config.getInstanceConfigurationList().get(0).getBaseDir(); try { Validate.notBlank(baseDir); new File(baseDir).getCanonicalPath(); // this should catch erroneous paths } catch (Exception e) { throw new ElasticsearchSetupException(String.format( "The value of the 'baseDir' parameter ('%1$s') is not a valid file path.", baseDir)); } }
private void terminateUnixProcess(InstanceConfiguration config) { log.info("Cleaning up at application shutdown..."); if (process.isAlive()) { log.info("The Elasticsearch process is still running; stopping it ..."); process.destroy(); try { int exitCode = process.waitFor(); log.info(String.format( "... the Elasticsearch process [%d] has stopped. Exit code: %d", config.getId(), exitCode)); } catch (InterruptedException e) { log.error( String.format( "Error while waiting for the Elasticsearch process [%d] to be destroyed", config.getId()), e); } } else { log.info("The Elasticsearch process has already stopped. Nothing to clean up"); } }
public ClusterConfiguration build() { ClusterConfiguration config = new ClusterConfiguration( instanceConfigurationList, artifactResolver, artifactInstaller, log); config.flavour = flavour; config.version = version; config.downloadUrl = downloadUrl; config.clusterName = clusterName; config.pathConf = pathConf; config.plugins = plugins; config.pathInitScript = pathInitScript; config.keepExistingData = keepExistingData; config.timeout = timeout; config.clientSocketTimeout = clientSocketTimeout; config.setAwait = setAwait; config.autoCreateIndex = autoCreateIndex; config.getInstanceConfigurationList().forEach(c -> c.setClusterConfiguration(config)); return config; } }
@Override public void execute(InstanceConfiguration config) { File unpackDirectory = null; try { File artifact = resolveArtifact(config.getClusterConfiguration()); unpackDirectory = unpackToElasticsearchDirectory(artifact, config); setupElasticsearchConf(config); } catch (ArtifactException | IOException e) { throw new RuntimeException(e); } finally { cleanUp(unpackDirectory, config.getClusterConfiguration()); } }
@Override public void execute(ClusterConfiguration config) { List<Integer> ports = new ArrayList<>(); // Iterate twice, because I want to maintain the order: // HTTP ports first, then transport ports config.getInstanceConfigurationList().forEach(instanceConfig -> { ports.add(instanceConfig.getHttpPort()); }); config.getInstanceConfigurationList().forEach(instanceConfig -> { ports.add(instanceConfig.getTransportPort()); }); List<Integer> protectedPorts = ports.stream() .filter(port -> port < 1024) .collect(Collectors.toList()); if (protectedPorts.size() > 0) { throw new ElasticsearchSetupException(String.format( "The following provided or inferred ports are protected (below 1024): %s", StringUtils.join(protectedPorts, ','))); } }
@Override public void execute() throws MojoExecutionException, MojoFailureException { if (skip) { getLog().info("Skipping plugin execution"); return; } ClusterConfiguration clusterConfig = buildClusterConfiguration(); for (InstanceConfiguration config : clusterConfig.getInstanceConfigurationList()) { try { getLog().info(String.format("Stopping Elasticsearch [%s]", config)); String baseDir = config.getBaseDir(); ProcessUtil.executeScript(config, getShutdownScriptCommand(baseDir)); getLog().info(String.format("Elasticsearch [%d] stopped", config.getId())); ProcessUtil.cleanupPid(baseDir); } catch (Exception e) { getLog().error("Exception while stopping Elasticsearch", e); } } }
private File unpackToElasticsearchDirectory(File artifact, InstanceConfiguration config) throws IOException { File unpackDirectory = getUnpackDirectory(); ZipUtil.unpack(artifact, unpackDirectory); File baseDir = new File(config.getBaseDir()); moveToElasticsearchDirectory(unpackDirectory, baseDir); return unpackDirectory; }
config.getId(), config));
@Override public void execute(InstanceConfiguration config) { int timeout = config.getClusterConfiguration().getTimeout(); ElasticsearchClient client = new ElasticsearchClient.Builder() .withInstanceConfiguration(config) .withHostname("localhost") .build(); Monitor monitor = new Monitor(client, config.getClusterConfiguration().getLog()); monitor.waitToStartInstance( config.getBaseDir(), config.getClusterConfiguration().getClusterName(), timeout); } }
@Before public void setup() { when(config.getLog()).thenReturn(log); when(instanceConfig.getClusterConfiguration()).thenReturn(config); when(config.getInstanceConfigurationList()).thenReturn(Arrays.asList(instanceConfig)); }
@Override public void execute(InstanceConfiguration config) Log log = config.getClusterConfiguration().getLog(); File pluginsDir = new File(config.getBaseDir(), "plugins"); try if (VersionUtil.isEqualOrGreater_6_4_0(config.getClusterConfiguration().getVersion()))
@Override public void execute(InstanceConfiguration config) if (config.getClusterConfiguration().getPlugins().size() > 0) if (VersionUtil.isEqualOrGreater_6_4_0(config.getClusterConfiguration().getVersion())) Log log = config.getClusterConfiguration().getLog(); for (PluginConfiguration plugin : config.getClusterConfiguration().getPlugins())