Codota Logo
AddServicesRequest
Code IndexAdd Codota to your IDE (free)

How to use
AddServicesRequest
in
co.cask.coopr.http.request

Best Java code snippets using co.cask.coopr.http.request.AddServicesRequest (Showing top 9 results out of 315)

  • Add the Codota plugin to your IDE and get smart completions
private void myMethod () {
ScheduledThreadPoolExecutor s =
  • Codota Iconnew ScheduledThreadPoolExecutor(corePoolSize)
  • Codota IconThreadFactory threadFactory;new ScheduledThreadPoolExecutor(corePoolSize, threadFactory)
  • Codota IconString str;new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder().setNameFormat(str).build())
  • Smart code suggestions by Codota
}
origin: caskdata/coopr

 @Override
 public AddServicesRequest deserialize(JsonElement json, Type type, JsonDeserializationContext context)
  throws JsonParseException {
  JsonObject jsonObj = json.getAsJsonObject();
  Map<String, Object> providerFields = context.deserialize(jsonObj.get("providerFields"),
                               new TypeToken<Map<String, String>>() { }.getType());
  Set<String> services = context.deserialize(jsonObj.get("services"), new TypeToken<Set<String>>() { }.getType());

  return new AddServicesRequest(providerFields, services);
 }
}
origin: caskdata/coopr

String servicesStr = Joiner.on(',').join(request.getServices());
try {
 changedNodes = solver.addServicesToCluster(cluster, clusterNodes, request.getServices());
} catch (IllegalArgumentException e) {
 LOG.debug("Could not add services {} to cluster {}.", servicesStr, cluster.getId(), e);
                   request.getServices(), changedNodeIds);
cluster.setLatestJobId(createJob.getJobId());
clusterStore.writeClusterJob(createJob);
origin: caskdata/coopr

@Test
public void testUsesExistingCredentials() throws Exception {
 Cluster cluster = createActiveCluster();
 // add required sensitive user field
 Map<String, Object> sensitiveFields = Maps.newHashMap();
 sensitiveFields.put("key", "keycontents");
 credentialStore.set(account.getTenantId(), cluster.getId(), sensitiveFields);
 // request doesn't contain the required key field, but it should be picked up from the credential store
 // so this should go through without throwing an exception.
 AddServicesRequest addServicesRequest = new AddServicesRequest(null, ImmutableSet.of(service2.getName()));
 clusterService.requestAddServices(cluster.getId(), account, addServicesRequest);
}
origin: caskdata/coopr

solver.validateServicesToAdd(cluster, addRequest.getServices());
ClusterJob job = new ClusterJob(jobId, action, addRequest.getServices(), null);
job.setJobStatus(ClusterJob.Status.RUNNING);
cluster.setLatestJobId(job.getJobId());
origin: caskdata/coopr

@Test
public void testAddInvalidServicesReturns400() throws Exception {
 Cluster cluster = Cluster.builder()
  .setID("123")
  .setAccount(USER1_ACCOUNT)
  .setName("test-cluster")
  .setClusterTemplate(Entities.ClusterTemplateExample.HDFS)
  .setServices(ImmutableSet.<String>of("namenode", "datanode"))
  .setStatus(Cluster.Status.ACTIVE)
  .build();
 clusterStoreService.getView(cluster.getAccount()).writeCluster(cluster);
 // can't add nodemanager without resourcemanager
 AddServicesRequest body = new AddServicesRequest(null, ImmutableSet.of("nodemanager"));
 assertResponseStatus(doPostExternalAPI("/clusters/123/services", gson.toJson(body), USER1_HEADERS),
            HttpResponseStatus.BAD_REQUEST);
 // can't add nonexistant service
 body = new AddServicesRequest(null, ImmutableSet.of("fakeservice"));
 assertResponseStatus(doPostExternalAPI("/clusters/123/services", gson.toJson(body), USER1_HEADERS),
            HttpResponseStatus.BAD_REQUEST);
}
origin: caskdata/coopr

@Test
public void testAddServicesCanOnlyRunOnActiveCluster() throws Exception {
 Cluster cluster = Cluster.builder()
  .setID("123")
  .setAccount(USER1_ACCOUNT)
  .setName("test-cluster")
  .setClusterTemplate(Entities.ClusterTemplateExample.HDFS)
  .setServices(ImmutableSet.<String>of("namenode", "datanode"))
  .build();
 Set<Cluster.Status> badStatuses = ImmutableSet.of(
  Cluster.Status.INCOMPLETE, Cluster.Status.PENDING, Cluster.Status.TERMINATED, Cluster.Status.INCONSISTENT);
 AddServicesRequest body = new AddServicesRequest(null, ImmutableSet.of("resourcemanager", "nodemanager"));
 for (Cluster.Status status : badStatuses) {
  cluster.setStatus(status);
  clusterStoreService.getView(cluster.getAccount()).writeCluster(cluster);
  assertResponseStatus(doPostExternalAPI("/clusters/123/services", gson.toJson(body), USER1_HEADERS),
             HttpResponseStatus.CONFLICT);
 }
}
origin: caskdata/coopr

@Test
public void testAddServicesOnNonexistantClusterReturns404() throws Exception {
 Cluster cluster = Cluster.builder()
  .setID("123")
  .setAccount(USER1_ACCOUNT)
  .setName("test-cluster")
  .setClusterTemplate(Entities.ClusterTemplateExample.HDFS)
  .setServices(ImmutableSet.<String>of("namenode", "datanode"))
  .setStatus(Cluster.Status.ACTIVE)
  .build();
 clusterStoreService.getView(cluster.getAccount()).writeCluster(cluster);
 AddServicesRequest body = new AddServicesRequest(null, ImmutableSet.of("resourcemanager", "nodemanager"));
 assertResponseStatus(doPostExternalAPI("/clusters/1123/services", gson.toJson(body), USER1_HEADERS),
            HttpResponseStatus.NOT_FOUND);
 assertResponseStatus(doPostExternalAPI("/clusters/123/services", gson.toJson(body), USER2_HEADERS),
            HttpResponseStatus.NOT_FOUND);
}
origin: caskdata/coopr

@Test
public void testAddServices() throws Exception {
 Cluster cluster = createActiveCluster();
 // add required sensitive user field
 Map<String, Object> sensitiveFields = Maps.newHashMap();
 sensitiveFields.put("key", "keycontents");
 AddServicesRequest addServicesRequest =
  new AddServicesRequest(sensitiveFields, ImmutableSet.of(service2.getName()));
 clusterService.requestAddServices(cluster.getId(), account, addServicesRequest);
 // nonsensitive fields should be everything currently in the provider before we get the updated cluster
 Map<String, Object> expectedNonsensitiveFields = cluster.getProvider().getProvisionerFields();
 // get the updated cluster
 cluster = clusterStore.getCluster(cluster.getId());
 // nonsensitive fields should be everything currently in the provider plus the nonsensitive user fields
 // given in the request
 Assert.assertEquals(expectedNonsensitiveFields, cluster.getProvider().getProvisionerFields());
 Assert.assertEquals(sensitiveFields, credentialStore.get(account.getTenantId(), cluster.getId()));
}
origin: caskdata/coopr

@Test
public void testSensitiveUserFields() throws Exception {
 Map<String, Object> sensitiveFields = Maps.newHashMap();
 sensitiveFields.put("key", "keycontents");
 AddServicesRequest addRequest = new AddServicesRequest(sensitiveFields, ImmutableSet.of(service2.getName()));
 ClusterOperationRequest opRequest = new ClusterOperationRequest(sensitiveFields);
 Cluster cluster = createActiveCluster();
 clusterService.requestAddServices(cluster.getId(), account, addRequest);
 testSensitiveFieldsAdded(cluster, sensitiveFields);
 clusterStore.deleteCluster(cluster.getId());
 cluster = createActiveCluster();
 clusterService.requestClusterDelete(cluster.getId(), account, opRequest);
 testSensitiveFieldsAdded(cluster, sensitiveFields);
 clusterStore.deleteCluster(cluster.getId());
 cluster = createActiveCluster();
 clusterService.requestServiceRuntimeAction(cluster.getId(), account, ClusterAction.RESTART_SERVICES,
                       service1.getName(), opRequest);
 testSensitiveFieldsAdded(cluster, sensitiveFields);
 clusterStore.deleteCluster(cluster.getId());
}
co.cask.coopr.http.requestAddServicesRequest

Javadoc

Request for add services to a cluster.

Most used methods

  • <init>
    Create a request to add services to a cluster.
  • getServices
    Get the services to add to the cluster.

Popular in Java

  • Making http post requests using okhttp
  • getResourceAsStream (ClassLoader)
  • setContentView (Activity)
  • notifyDataSetChanged (ArrayAdapter)
  • GridBagLayout (java.awt)
    The GridBagLayout class is a flexible layout manager that aligns components vertically and horizonta
  • FileReader (java.io)
    A specialized Reader that reads from a file in the file system. All read requests made by calling me
  • PrintWriter (java.io)
    Prints formatted representations of objects to a text-output stream. This class implements all of th
  • BigInteger (java.math)
    Immutable arbitrary-precision integers. All operations behave as if BigIntegers were represented in
  • HttpURLConnection (java.net)
    An URLConnection for HTTP (RFC 2616 [http://tools.ietf.org/html/rfc2616]) used to send and receive d
  • AtomicInteger (java.util.concurrent.atomic)
    An int value that may be updated atomically. See the java.util.concurrent.atomic package specificati
Codota Logo
  • Products

    Search for Java codeSearch for JavaScript codeEnterprise
  • IDE Plugins

    IntelliJ IDEAWebStormAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogCodota Academy Plugin user guide Terms of usePrivacy policyJava Code IndexJavascript Code Index
Get Codota for your IDE now