/** * Gets the net execution time of the job, i.e., the execution time in the parallel system, * without the pre-flight steps like the optimizer in a desired time unit. * * @param desiredUnit the unit of the <tt>NetRuntime</tt> * @return The net execution time in the desired unit. */ public long getNetRuntime(TimeUnit desiredUnit) { return desiredUnit.convert(getNetRuntime(), TimeUnit.MILLISECONDS); }
protected void executeProgram(PackagedProgram program, ClusterClient<?> client, int parallelism) throws ProgramMissingJobException, ProgramInvocationException { logAndSysout("Starting execution of program"); final JobSubmissionResult result = client.run(program, parallelism); if (null == result) { throw new ProgramMissingJobException("No JobSubmissionResult returned, please make sure you called " + "ExecutionEnvironment.execute()"); } if (result.isJobExecutionResult()) { logAndSysout("Program execution finished"); JobExecutionResult execResult = result.getJobExecutionResult(); System.out.println("Job with JobID " + execResult.getJobID() + " has finished."); System.out.println("Job Runtime: " + execResult.getNetRuntime() + " ms"); Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults(); if (accumulatorsResult.size() > 0) { System.out.println("Accumulator Results: "); System.out.println(AccumulatorHelper.getResultsFormatted(accumulatorsResult)); } } else { logAndSysout("Job has been submitted with JobID " + result.getJobID()); } }
private void testProgram( final MiniClusterWithClientResource cluster, final int dataVolumeGb, final boolean useForwarder, final boolean isSlowSender, final boolean isSlowReceiver, final int parallelism) throws Exception { ClusterClient<?> client = cluster.getClusterClient(); client.setDetached(false); client.setPrintStatusDuringExecution(false); JobExecutionResult jer = (JobExecutionResult) client.submitJob( createJobGraph( dataVolumeGb, useForwarder, isSlowSender, isSlowReceiver, parallelism), getClass().getClassLoader()); long dataVolumeMbit = dataVolumeGb * 8192; long runtimeSecs = jer.getNetRuntime(TimeUnit.SECONDS); int mbitPerSecond = (int) (((double) dataVolumeMbit) / runtimeSecs); LOG.info(String.format("Test finished with throughput of %d MBit/s (runtime [secs]: %d, " + "data volume [gb/mbits]: %d/%d)", mbitPerSecond, runtimeSecs, dataVolumeGb, dataVolumeMbit)); }
json.writeNumberField("runtime_ms", result.getNetRuntime());
private void executeAndRunAssertions(ExecutionEnvironment env) throws Exception { try { JobExecutionResult result = env.execute(); assertTrue(result.getNetRuntime() >= 0); assertNotNull(result.getAllAccumulatorResults()); assertTrue(result.getAllAccumulatorResults().isEmpty()); } catch (JobExecutionException e) { fail("The program should have succeeded on the second run"); } }
public static void main(String[] args) throws Exception { final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.fromElements(1, 2).output(new DiscardingOutputFormat<Integer>()); env.execute().getNetRuntime(); } }
long runtime = jer.getNetRuntime(); streamer.sendRecord(runtime);
@Test public void testAccumulator() { try { final int numElements = 100; ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment(); env.generateSequence(1, numElements) .map(new CountingMapper()) .output(new DiscardingOutputFormat<Long>()); JobExecutionResult result = env.execute(); assertTrue(result.getNetRuntime() >= 0); assertEquals(numElements, (int) result.getAccumulatorResult(ACCUMULATOR_NAME)); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
ClassLoader.getSystemClassLoader()); assertThat(jobExecutionResult.getJobID(), equalTo(jobId)); assertThat(jobExecutionResult.getNetRuntime(), equalTo(Long.MAX_VALUE)); assertThat( jobExecutionResult.getAllAccumulatorResults(),
String msg = res == null ? "null result" : "result in " + res.getNetRuntime() + " ms"; fail("The program should have failed, but returned " + msg);
/** * Gets the net execution time of the job, i.e., the execution time in the parallel system, * without the pre-flight steps like the optimizer in a desired time unit. * * @param desiredUnit the unit of the <tt>NetRuntime</tt> * @return The net execution time in the desired unit. */ public long getNetRuntime(TimeUnit desiredUnit) { return desiredUnit.convert(getNetRuntime(), TimeUnit.MILLISECONDS); }
/** * Gets the net execution time of the job, i.e., the execution time in the parallel system, * without the pre-flight steps like the optimizer in a desired time unit. * * @param desiredUnit the unit of the <tt>NetRuntime</tt> * @return The net execution time in the desired unit. */ public long getNetRuntime(TimeUnit desiredUnit) { return desiredUnit.convert(getNetRuntime(), TimeUnit.MILLISECONDS); }
/** * Method to create and add lines to a csv-file * * @param env given ExecutionEnvironment * @throws IOException exeption during file writing */ private static void writeCSV(ExecutionEnvironment env) throws IOException { String head = String .format("%s|%s|%s|%s|%s|%s%n", "Parallelism", "dataset", "vertex-label", "edge-label", "verification", "Runtime(s)"); String tail = String .format("%s|%s|%s|%s|%s|%s%n", env.getParallelism(), INPUT_PATH, VERTEX_LABEL, EDGE_LABEL, VERIFICATION, env.getLastJobExecutionResult().getNetRuntime(TimeUnit.SECONDS)); File f = new File(CSV_PATH); if (f.exists() && !f.isDirectory()) { FileUtils.writeStringToFile(f, tail, true); } else { PrintWriter writer = new PrintWriter(CSV_PATH, "UTF-8"); writer.print(head); writer.print(tail); writer.close(); } }
@Override public FlinkRunnerResult run(Pipeline pipeline) { LOG.info("Executing pipeline using FlinkPipelineRunner."); LOG.info("Translating pipeline to Flink program."); this.flinkJobEnv.translate(pipeline); LOG.info("Starting execution of Flink program."); JobExecutionResult result; try { result = this.flinkJobEnv.executePipeline(); } catch (Exception e) { LOG.error("Pipeline execution failed", e); throw new RuntimeException("Pipeline execution failed", e); } LOG.info("Execution finished in {} msecs", result.getNetRuntime()); Map<String, Object> accumulators = result.getAllAccumulatorResults(); if (accumulators != null && !accumulators.isEmpty()) { LOG.info("Final aggregator values:"); for (Map.Entry<String, Object> entry : result.getAllAccumulatorResults().entrySet()) { LOG.info("{} : {}", entry.getKey(), entry.getValue()); } } return new FlinkRunnerResult(accumulators, result.getNetRuntime()); }
static PipelineResult createPipelineResult(JobExecutionResult result, PipelineOptions options) { if (result instanceof DetachedEnvironment.DetachedJobExecutionResult) { LOG.info("Pipeline submitted in Detached mode"); // no metricsPusher because metrics are not supported in detached mode return new FlinkDetachedRunnerResult(); } else { LOG.info("Execution finished in {} msecs", result.getNetRuntime()); Map<String, Object> accumulators = result.getAllAccumulatorResults(); if (accumulators != null && !accumulators.isEmpty()) { LOG.info("Final accumulator values:"); for (Map.Entry<String, Object> entry : result.getAllAccumulatorResults().entrySet()) { LOG.info("{} : {}", entry.getKey(), entry.getValue()); } } FlinkRunnerResult flinkRunnerResult = new FlinkRunnerResult(accumulators, result.getNetRuntime()); MetricsPusher metricsPusher = new MetricsPusher( flinkRunnerResult.getMetricsContainerStepMap(), options, flinkRunnerResult); metricsPusher.start(); return flinkRunnerResult; } }
static PipelineResult createPipelineResult(JobExecutionResult result, PipelineOptions options) { if (result instanceof DetachedEnvironment.DetachedJobExecutionResult) { LOG.info("Pipeline submitted in Detached mode"); // no metricsPusher because metrics are not supported in detached mode return new FlinkDetachedRunnerResult(); } else { LOG.info("Execution finished in {} msecs", result.getNetRuntime()); Map<String, Object> accumulators = result.getAllAccumulatorResults(); if (accumulators != null && !accumulators.isEmpty()) { LOG.info("Final accumulator values:"); for (Map.Entry<String, Object> entry : result.getAllAccumulatorResults().entrySet()) { LOG.info("{} : {}", entry.getKey(), entry.getValue()); } } FlinkRunnerResult flinkRunnerResult = new FlinkRunnerResult(accumulators, result.getNetRuntime()); MetricsPusher metricsPusher = new MetricsPusher( flinkRunnerResult.getMetricsContainerStepMap(), options.as(MetricsOptions.class), flinkRunnerResult); metricsPusher.start(); return flinkRunnerResult; } }
/** * Returns a string containing information about the benchmark run. * * @return benchmark result string */ private String getResultString() { return String.format("%s|%s|%s|%s|%s|%s", inputPath, getExecutionEnvironment().getParallelism(), traverserStrategy.name(), query, embeddingCount, getExecutionEnvironment().getLastJobExecutionResult().getNetRuntime()); }
protected void executeProgram(PackagedProgram program, ClusterClient<?> client, int parallelism) throws ProgramMissingJobException, ProgramInvocationException { logAndSysout("Starting execution of program"); final JobSubmissionResult result = client.run(program, parallelism); if (null == result) { throw new ProgramMissingJobException("No JobSubmissionResult returned, please make sure you called " + "ExecutionEnvironment.execute()"); } if (result.isJobExecutionResult()) { logAndSysout("Program execution finished"); JobExecutionResult execResult = result.getJobExecutionResult(); System.out.println("Job with JobID " + execResult.getJobID() + " has finished."); System.out.println("Job Runtime: " + execResult.getNetRuntime() + " ms"); Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults(); if (accumulatorsResult.size() > 0) { System.out.println("Accumulator Results: "); System.out.println(AccumulatorHelper.getResultsFormatted(accumulatorsResult)); } } else { logAndSysout("Job has been submitted with JobID " + result.getJobID()); } }
protected void executeProgram(PackagedProgram program, ClusterClient<?> client, int parallelism) throws ProgramMissingJobException, ProgramInvocationException { logAndSysout("Starting execution of program"); final JobSubmissionResult result = client.run(program, parallelism); if (null == result) { throw new ProgramMissingJobException("No JobSubmissionResult returned, please make sure you called " + "ExecutionEnvironment.execute()"); } if (result.isJobExecutionResult()) { logAndSysout("Program execution finished"); JobExecutionResult execResult = result.getJobExecutionResult(); System.out.println("Job with JobID " + execResult.getJobID() + " has finished."); System.out.println("Job Runtime: " + execResult.getNetRuntime() + " ms"); Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults(); if (accumulatorsResult.size() > 0) { System.out.println("Accumulator Results: "); System.out.println(AccumulatorHelper.getResultsFormatted(accumulatorsResult)); } } else { logAndSysout("Job has been submitted with JobID " + result.getJobID()); } }
public CentralizedWeightedMatching() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment(); // Source: http://grouplens.org/datasets/movielens/ @SuppressWarnings("serial") DataStream<Edge<Long, Long>> edges = env .readTextFile("movielens_10k_sorted.txt") .map(new MapFunction<String, Edge<Long, Long>>() { @Override public Edge<Long, Long> map(String s) throws Exception { String[] args = s.split("\t"); long src = Long.parseLong(args[0]); long trg = Long.parseLong(args[1]) + 1000000; long val = Long.parseLong(args[2]) * 10; return new Edge<>(src, trg, val); } }); GraphStream<Long, NullValue, Long> graph = new SimpleEdgeStream<>(edges, env); graph.getEdges() .flatMap(new WeightedMatchingFlatMapper()).setParallelism(1) .print().setParallelism(1); JobExecutionResult res = env.execute("Distributed Merge Tree Sandbox"); long runtime = res.getNetRuntime(); System.out.println("Runtime: " + runtime); }