@Override public Cluster createCluster(ProvisionerContext context) { RemoteHadoopConf conf = RemoteHadoopConf.fromProperties(context.getProperties()); context.getSSHContext().setSSHKeyPair(conf.getKeyPair()); Collection<Node> nodes = Collections.singletonList(new Node(conf.getHost(), Node.Type.MASTER, conf.getHost(), 0, Collections.emptyMap())); return new Cluster(conf.getHost(), ClusterStatus.RUNNING, nodes, Collections.emptyMap()); }
@Override public Cluster createCluster(ProvisionerContext context) throws Exception { if (!SparkCompat.SPARK2_2_11.equals(context.getSparkCompat())) { throw new UnsupportedOperationException("EMR currently only supports " + SparkCompat.SPARK2_2_11); } // Generates and set the ssh key SSHKeyPair sshKeyPair = context.getSSHContext().generate("ec2-user"); // or 'hadoop' context.getSSHContext().setSSHKeyPair(sshKeyPair); EMRConf conf = EMRConf.fromProvisionerContext(context); String clusterName = getClusterName(context.getProgramRun()); try (EMRClient client = EMRClient.fromConf(conf)) { // if it already exists, it means this is a retry. We can skip actually making the request Optional<ClusterSummary> existing = client.getUnterminatedClusterByName(clusterName); if (existing.isPresent()) { return client.getCluster(existing.get().getId()).get(); } String clusterId = client.createCluster(clusterName); return new Cluster(clusterId, ClusterStatus.CREATING, Collections.emptyList(), Collections.emptyMap()); } }
@Override public Cluster createCluster(ProvisionerContext context) throws Exception { // Generates and set the ssh key SSHKeyPair sshKeyPair = context.getSSHContext().generate("cdap"); context.getSSHContext().setSSHKeyPair(sshKeyPair); DataprocConf conf = DataprocConf.fromProvisionerContext(context); String clusterName = getClusterName(context.getProgramRun()); try (DataprocClient client = DataprocClient.fromConf(conf)) { // if it already exists, it means this is a retry. We can skip actually making the request Optional<Cluster> existing = client.getCluster(clusterName); if (existing.isPresent()) { return existing.get(); } String imageVersion; switch (context.getSparkCompat()) { case SPARK1_2_10: imageVersion = "1.0"; break; case SPARK2_2_11: default: imageVersion = "1.2"; break; } client.createCluster(clusterName, imageVersion, systemLabels); return new Cluster(clusterName, ClusterStatus.CREATING, Collections.emptyList(), Collections.emptyMap()); } }