@Override public synchronized void close() { if (streamingContext != null) { log.info("Shutting down Spark Streaming; this may take some time"); streamingContext.stop(true, true); streamingContext = null; } }
@Override public synchronized void close() { if (modelManager != null) { log.info("Shutting down model manager"); modelManager.close(); modelManager = null; } if (consumerIterator != null) { log.info("Shutting down consumer"); consumerIterator.close(); consumerIterator = null; } if (streamingContext != null) { log.info("Shutting down Spark Streaming; this may take some time"); streamingContext.stop(true, true); streamingContext = null; } }
public static void main(String[] args) throws Exception { String zkQuorum = args[0]; String group = args[1]; SparkConf conf = new SparkConf().setAppName("KafkaInput"); // Create a StreamingContext with a 1 second batch size JavaStreamingContext jssc = new JavaStreamingContext(conf, new Duration(1000)); Map<String, Integer> topics = new HashMap<String, Integer>(); topics.put("pandas", 1); JavaPairDStream<String, String> input = KafkaUtils.createStream(jssc, zkQuorum, group, topics); input.print(); // start our streaming context and wait for it to "finish" jssc.start(); // Wait for 10 seconds then exit. To run forever call without a timeout jssc.awaitTermination(10000); // Stop the streaming context jssc.stop(); } }
public static void main(String[] args) throws Exception { String master = args[0]; JavaSparkContext sc = new JavaSparkContext(master, "StreamingLogInput"); // Create a StreamingContext with a 1 second batch size JavaStreamingContext jssc = new JavaStreamingContext(sc, new Duration(1000)); // Create a DStream from all the input on port 7777 JavaDStream<String> lines = jssc.socketTextStream("localhost", 7777); // Filter our DStream for lines with "error" JavaDStream<String> errorLines = lines.filter(new Function<String, Boolean>() { public Boolean call(String line) { return line.contains("error"); }}); // Print out the lines with errors, which causes this DStream to be evaluated errorLines.print(); // start our streaming context and wait for it to "finish" jssc.start(); // Wait for 10 seconds then exit. To run forever call without a timeout jssc.awaitTermination(10000); // Stop the streaming context jssc.stop(); } }
@Override public synchronized void close() { if (streamingContext != null) { log.info("Shutting down Spark Streaming; this may take some time"); streamingContext.stop(true, true); streamingContext = null; } }
@After public void tearDown() { ssc.stop(); ssc = null; }
@After public void tearDown() { ssc.stop(); ssc = null; } }
@After public void tearDown() { ssc.stop(); ssc = null; }
@After public void tearDown() { ssc.stop(); ssc = null; }
@After public void tearDown() { ssc.stop(); ssc = null; }
@After public void tearDown() { ssc.stop(); ssc = null; }
@After public void tearDown() { ssc.stop(); ssc = null; }
@After public void tearDown() { ssc.stop(); ssc = null; }
@After public void tearDown() { ssc.stop(); ssc = null; }
@After public void tearDown() { spark.stop(); ssc.stop(); spark = null; }
@After public void tearDown() { spark.stop(); ssc.stop(); spark = null; }
@After public void tearDown() { spark.stop(); ssc.stop(); spark = null; }
@After public void tearDown() { if (ssc != null) { ssc.stop(); ssc = null; } if (kafkaTestUtils != null) { kafkaTestUtils.teardown(); kafkaTestUtils = null; } }
@After public void tearDown() { if (ssc != null) { ssc.stop(); ssc = null; } if (kafkaTestUtils != null) { kafkaTestUtils.teardown(); kafkaTestUtils = null; } }
@After public void tearDown() { if (ssc != null) { ssc.stop(); ssc = null; } if (kafkaTestUtils != null) { kafkaTestUtils.teardown(); kafkaTestUtils = null; } }