@Override public void run() { try { client.setDetached(false); client.submitJob(jobGraph, KafkaConsumerTestBase.class.getClassLoader()); } catch (Throwable t) { LOG.error("Job Runner failed with exception", t); error.set(t); } } };
@Override public void run() { try { client.setDetached(false); client.submitJob(jobGraph, KafkaConsumerTestBase.class.getClassLoader()); } catch (Throwable t) { jobError.set(t); } } };
@Override public void run() { try { client.setDetached(false); client.submitJob(jobGraph, KafkaConsumerTestBase.class.getClassLoader()); } catch (Throwable t) { if (!ExceptionUtils.findThrowable(t, JobCancellationException.class).isPresent()) { LOG.warn("Got exception during execution", t); error.f0 = t; } } } };
@Override public void run() { try { client.setDetached(false); client.submitJob(jobGraph, KafkaConsumerTestBase.class.getClassLoader()); } catch (Throwable t) { if (!ExceptionUtils.findThrowable(t, JobCancellationException.class).isPresent()) { error.set(t); } } } });
private void runJobSubmissionTest(JobGraph jobGraph, Predicate<Exception> failurePredicate) throws org.apache.flink.client.program.ProgramInvocationException { ClusterClient<?> client = MINI_CLUSTER_RESOURCE.getClusterClient(); client.setDetached(detached); try { client.submitJob(jobGraph, JobSubmissionFailsITCase.class.getClassLoader()); fail("Job submission should have thrown an exception."); } catch (Exception e) { if (!failurePredicate.test(e)) { throw e; } } client.setDetached(false); client.submitJob(getWorkingJobGraph(), JobSubmissionFailsITCase.class.getClassLoader()); }
clusterClient.setDetached(false); final JobExecutionResult jobResult = clusterClient .submitJob(jobGraph, context.getClassLoader()) clusterClient.setDetached(true); clusterClient.submitJob(jobGraph, context.getClassLoader());
private void testProgram( final MiniClusterWithClientResource cluster, final int dataVolumeGb, final boolean useForwarder, final boolean isSlowSender, final boolean isSlowReceiver, final int parallelism) throws Exception { ClusterClient<?> client = cluster.getClusterClient(); client.setDetached(false); client.setPrintStatusDuringExecution(false); JobExecutionResult jer = (JobExecutionResult) client.submitJob( createJobGraph( dataVolumeGb, useForwarder, isSlowSender, isSlowReceiver, parallelism), getClass().getClassLoader()); long dataVolumeMbit = dataVolumeGb * 8192; long runtimeSecs = jer.getNetRuntime(TimeUnit.SECONDS); int mbitPerSecond = (int) (((double) dataVolumeMbit) / runtimeSecs); LOG.info(String.format("Test finished with throughput of %d MBit/s (runtime [secs]: %d, " + "data volume [gb/mbits]: %d/%d)", mbitPerSecond, runtimeSecs, dataVolumeGb, dataVolumeMbit)); }
@Test public void testMigrationAndRestore() throws Throwable { ClassLoader classLoader = this.getClass().getClassLoader(); ClusterClient<?> clusterClient = MINI_CLUSTER_RESOURCE.getClusterClient(); clusterClient.setDetached(true); final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT); // submit job with old version savepoint and create a migrated savepoint in the new version String savepointPath = migrateJob(classLoader, clusterClient, deadline); // restore from migrated new version savepoint restoreJob(classLoader, clusterClient, deadline, savepointPath); }
client.setDetached(true);
client.setDetached(true); client.submitJob(jobGraph, RescalingITCase.class.getClassLoader()); client.setDetached(false); client.submitJob(scaledJobGraph, RescalingITCase.class.getClassLoader()); } catch (JobExecutionException exception) {
private String submitJobAndTakeSavepoint(MiniClusterResourceFactory clusterFactory, int parallelism) throws Exception { final JobGraph jobGraph = createJobGraph(parallelism, 0, 1000); final JobID jobId = jobGraph.getJobID(); StatefulCounter.resetForTest(parallelism); MiniClusterWithClientResource cluster = clusterFactory.get(); cluster.before(); ClusterClient<?> client = cluster.getClusterClient(); try { client.setDetached(true); client.submitJob(jobGraph, SavepointITCase.class.getClassLoader()); StatefulCounter.getProgressLatch().await(); return client.triggerSavepoint(jobId, null).get(); } finally { cluster.after(); StatefulCounter.resetForTest(parallelism); } }
protected void runAndCancelJob(Plan plan, final int msecsTillCanceling, int maxTimeTillCanceled) throws Exception { // submit job final JobGraph jobGraph = getJobGraph(plan); ClusterClient<?> client = CLUSTER.getClusterClient(); client.setDetached(true); JobSubmissionResult jobSubmissionResult = client.submitJob(jobGraph, CancelingTestBase.class.getClassLoader()); Deadline submissionDeadLine = new FiniteDuration(2, TimeUnit.MINUTES).fromNow(); JobStatus jobStatus = client.getJobStatus(jobSubmissionResult.getJobID()).get(GET_FUTURE_TIMEOUT, TimeUnit.MILLISECONDS); while (jobStatus != JobStatus.RUNNING && submissionDeadLine.hasTimeLeft()) { Thread.sleep(50); jobStatus = client.getJobStatus(jobSubmissionResult.getJobID()).get(GET_FUTURE_TIMEOUT, TimeUnit.MILLISECONDS); } if (jobStatus != JobStatus.RUNNING) { Assert.fail("Job not in state RUNNING."); } Thread.sleep(msecsTillCanceling); client.cancel(jobSubmissionResult.getJobID()); Deadline cancelDeadline = new FiniteDuration(maxTimeTillCanceled, TimeUnit.MILLISECONDS).fromNow(); JobStatus jobStatusAfterCancel = client.getJobStatus(jobSubmissionResult.getJobID()).get(GET_FUTURE_TIMEOUT, TimeUnit.MILLISECONDS); while (jobStatusAfterCancel != JobStatus.CANCELED && cancelDeadline.hasTimeLeft()) { Thread.sleep(50); jobStatusAfterCancel = client.getJobStatus(jobSubmissionResult.getJobID()).get(GET_FUTURE_TIMEOUT, TimeUnit.MILLISECONDS); } if (jobStatusAfterCancel != JobStatus.CANCELED) { Assert.fail("Failed to cancel job with ID " + jobSubmissionResult.getJobID() + '.'); } }
client.setDetached(true); client.submitJob(jobGraph, SavepointITCase.class.getClassLoader());
client.setDetached(false); client.submitJob(jobGraph, SavepointITCase.class.getClassLoader()); } catch (Exception e) {
clusterClient.setDetached(true); clusterClient.submitJob(jobGraph, WebFrontendITCase.class.getClassLoader());
client.setDetached(true);
client.setDetached(true); client.submitJob(graph, SavepointITCase.class.getClassLoader());
final JobGraph jobGraph = autoCancellableJob.getJobGraph(); clusterClient.setDetached(true); clusterClient.submitJob(jobGraph, AbstractQueryableStateTestBase.class.getClassLoader());
final JobGraph jobGraph = autoCancellableJob.getJobGraph(); clusterClient.setDetached(true); clusterClient.submitJob(jobGraph, AbstractQueryableStateTestBase.class.getClassLoader());
valueState); clusterClient.setDetached(true); clusterClient.submitJob(jobGraph, AbstractQueryableStateTestBase.class.getClassLoader());