@Test public void testTriggerSavepointFailureIllegalJobID() throws Exception { replaceStdOutAndStdErr(); try { CliFrontend frontend = new MockedCliFrontend(new StandaloneClusterClient( getConfiguration(), new TestingHighAvailabilityServices(), false)); String[] parameters = { "invalid job id" }; try { frontend.savepoint(parameters); fail("Should have failed."); } catch (CliArgsException e) { assertThat(e.getMessage(), Matchers.containsString("Cannot parse JobID")); } } finally { restoreStdOutAndStdErr(); } }
@Override public GetClusterStatusResponse getClusterStatus() { ActorGateway jmGateway; try { jmGateway = getJobManagerGateway(); Future<Object> future = jmGateway.ask(GetClusterStatus.getInstance(), timeout); Object result = Await.result(future, timeout); if (result instanceof GetClusterStatusResponse) { return (GetClusterStatusResponse) result; } else { throw new RuntimeException("Received the wrong reply " + result + " from cluster."); } } catch (Exception e) { throw new RuntimeException("Couldn't retrieve the cluster status.", e); } }
@Override public String getWebInterfaceURL() { final InetSocketAddress inetSocketAddressFromAkkaURL; try { inetSocketAddressFromAkkaURL = AkkaUtils.getInetSocketAddressFromAkkaURL(getClusterConnectionInfo().getAddress()); } catch (Exception e) { throw new RuntimeException("Could not retrieve leader retrieval information.", e); } String host = inetSocketAddressFromAkkaURL.getHostName(); int port = getFlinkConfiguration().getInteger(WebOptions.PORT); return "http://" + host + ":" + port; }
/** * FLINK-6641 * * <p>Tests that the {@link ClusterClient} does not clean up HA data when being shut down. */ @Test public void testClusterClientShutdown() throws Exception { Configuration config = new Configuration(); HighAvailabilityServices highAvailabilityServices = mock(HighAvailabilityServices.class); StandaloneClusterClient clusterClient = new StandaloneClusterClient(config, highAvailabilityServices, false); clusterClient.shutdown(); // check that the client does not clean up HA data but closes the services verify(highAvailabilityServices, never()).closeAndCleanupAllData(); verify(highAvailabilityServices).close(); }
/** * This test verifies correct job submission messaging logic and plan translation calls. */ @Test public void shouldSubmitToJobClient() throws Exception { jobManagerSystem.actorOf( Props.create(SuccessReturningActor.class), JobMaster.JOB_MANAGER_NAME); StandaloneClusterClient out = new StandaloneClusterClient(config); out.setDetached(true); JobSubmissionResult result = out.run(program.getPlanWithJars(), 1); assertNotNull(result); program.deleteExtractedLibraries(); }
StandaloneClusterClient client = new StandaloneClusterClient(configuration, highAvailabilityServices, true); ActorGateway gateway = client.getJobManagerGateway();
private static void testFailureBehavior(final InetSocketAddress unreachableEndpoint) throws Exception { final Configuration config = new Configuration(); config.setString(AkkaOptions.ASK_TIMEOUT, ASK_STARTUP_TIMEOUT + " ms"); config.setString(AkkaOptions.LOOKUP_TIMEOUT, CONNECT_TIMEOUT + " ms"); config.setString(JobManagerOptions.ADDRESS, unreachableEndpoint.getHostName()); config.setInteger(JobManagerOptions.PORT, unreachableEndpoint.getPort()); StandaloneClusterClient client = new StandaloneClusterClient(config); try { // we have to query the cluster status to start the connection attempts client.getClusterStatus(); fail("This should fail with an exception since the endpoint is unreachable."); } catch (Exception e) { // check that we have failed with a LeaderRetrievalException which says that we could // not connect to the leading JobManager assertTrue(CommonTestUtils.containsCause(e, LeaderRetrievalException.class)); } }
public void killTopologyWithOpts(final String name, final KillOptions options) throws NotAliveException { final JobID jobId = this.getTopologyJobId(name); if (jobId == null) { throw new NotAliveException("Storm topology with name " + name + " not found."); } if (options != null) { try { Thread.sleep(1000 * options.get_wait_secs()); } catch (final InterruptedException e) { throw new RuntimeException(e); } } final Configuration configuration = GlobalConfiguration.loadConfiguration(); configuration.setString(JobManagerOptions.ADDRESS, this.jobManagerHost); configuration.setInteger(JobManagerOptions.PORT, this.jobManagerPort); final StandaloneClusterClient client; try { client = new StandaloneClusterClient(configuration); } catch (final Exception e) { throw new RuntimeException("Could not establish a connection to the job manager", e); } try { client.stop(jobId); } catch (final Exception e) { throw new RuntimeException("Cannot stop job.", e); } }
client = new StandaloneClusterClient(configuration); } catch (final Exception e) { throw new RuntimeException("Could not establish a connection to the job manager", e); Collections.<URL>emptyList(), this.getClass().getClassLoader()); client.runDetached(jobGraph, classLoader); } catch (final ProgramInvocationException e) { throw new RuntimeException("Cannot execute job due to ProgramInvocationException", e);
@Override public JobSubmissionResult submitJob(JobGraph jobGraph, ClassLoader classLoader) throws ProgramInvocationException { if (isDetached()) { return super.runDetached(jobGraph, classLoader); } else { return super.run(jobGraph, classLoader); } } }
@Override public String getWebInterfaceURL() { String host = getJobManagerAddress().getHostString(); int port = getFlinkConfiguration().getInteger(ConfigConstants.JOB_MANAGER_WEB_PORT_KEY, ConfigConstants.DEFAULT_JOB_MANAGER_WEB_FRONTEND_PORT); return "http://" + host + ":" + port; }
/** * This test verifies correct that the correct exception is thrown when the job submission fails. */ @Test public void shouldSubmitToJobClientFails() throws Exception { jobManagerSystem.actorOf( Props.create(FailureReturningActor.class), JobMaster.JOB_MANAGER_NAME); StandaloneClusterClient out = new StandaloneClusterClient(config); out.setDetached(true); try { out.run(program.getPlanWithJars(), 1); fail("This should fail with an exception"); } catch (ProgramInvocationException e) { // bam! } catch (Exception e) { fail("wrong exception " + e); } }
@Override public JobSubmissionResult submitJob(JobGraph jobGraph, ClassLoader classLoader, boolean detached) throws ProgramInvocationException { if (isDetached() || isDetached()) { return super.runDetached(jobGraph, classLoader); } else { return super.run(jobGraph, classLoader); } } }
@Override public StandaloneClusterClient retrieve(String applicationID) { try { return new StandaloneClusterClient(config); } catch (Exception e) { throw new RuntimeException("Couldn't retrieve standalone cluster", e); } }
StandaloneClusterClient client = new StandaloneClusterClient(config); client.setDetached(true); client.run(packagedProgramMock, 1); fail("Creating the local execution environment should not be possible");
@Override public String getWebInterfaceURL() { final InetSocketAddress inetSocketAddressFromAkkaURL; try { inetSocketAddressFromAkkaURL = AkkaUtils.getInetSocketAddressFromAkkaURL(getClusterConnectionInfo().getAddress()); } catch (Exception e) { throw new RuntimeException("Could not retrieve leader retrieval information.", e); } String host = inetSocketAddressFromAkkaURL.getHostName(); int port = getFlinkConfiguration().getInteger(WebOptions.PORT); return "http://" + host + ":" + port; }
@Override public GetClusterStatusResponse getClusterStatus() { ActorGateway jmGateway; try { jmGateway = getJobManagerGateway(); Future<Object> future = jmGateway.ask(GetClusterStatus.getInstance(), timeout); Object result = Await.result(future, timeout); if (result instanceof GetClusterStatusResponse) { return (GetClusterStatusResponse) result; } else { throw new RuntimeException("Received the wrong reply " + result + " from cluster."); } } catch (Exception e) { throw new RuntimeException("Couldn't retrieve the cluster status.", e); } }
@Override public JobSubmissionResult submitJob(JobGraph jobGraph, ClassLoader classLoader) throws ProgramInvocationException { if (isDetached()) { return super.runDetached(jobGraph, classLoader); } else { return super.run(jobGraph, classLoader); } } }
@Override public StandaloneClusterClient retrieve(StandaloneClusterId standaloneClusterId) throws ClusterRetrieveException { try { return new StandaloneClusterClient(config); } catch (Exception e) { throw new ClusterRetrieveException("Couldn't retrieve standalone cluster", e); } }
Props.create(SuccessReturningActor.class), JobMaster.JOB_MANAGER_NAME); StandaloneClusterClient out = new StandaloneClusterClient(config); out.setDetached(true); out.run(prg, 1); fail(FAIL_MESSAGE); } catch (ProgramInvocationException e) { out.run(prg, 1); fail(FAIL_MESSAGE); } catch (ProgramInvocationException e) { out.run(prg, 1); fail(FAIL_MESSAGE); } catch (ProgramInvocationException e) { out.run(prg, 1); fail(FAIL_MESSAGE); } catch (ProgramInvocationException e) { out.run(prg, 1); fail(FAIL_MESSAGE); } catch (ProgramInvocationException e) { out.run(prg, 1); fail(FAIL_MESSAGE); } catch (ProgramInvocationException e) {