tryExecute(env, "big topology test"); deleteTestTopic(topic);
tryExecute(env, "Read data from Kafka");
/** * Test that ensures that DeserializationSchema.isEndOfStream() is properly evaluated. * * @throws Exception */ public void runEndOfStreamTest() throws Exception { final int elementCount = 300; final String topic = writeSequence("testEndOfStream", elementCount, 1, 1); // read using custom schema final StreamExecutionEnvironment env1 = StreamExecutionEnvironment.getExecutionEnvironment(); env1.setParallelism(1); env1.getConfig().setRestartStrategy(RestartStrategies.noRestart()); env1.getConfig().disableSysoutLogging(); Properties props = new Properties(); props.putAll(standardProps); props.putAll(secureProps); DataStream<Tuple2<Integer, Integer>> fromKafka = env1.addSource(kafkaServer.getConsumer(topic, new FixedNumberDeserializationSchema(elementCount), props)); fromKafka.flatMap(new FlatMapFunction<Tuple2<Integer, Integer>, Void>() { @Override public void flatMap(Tuple2<Integer, Integer> value, Collector<Void> out) throws Exception { // noop ;) } }); tryExecute(env1, "Consume " + elementCount + " elements from Kafka"); deleteTestTopic(topic); }
/** * Runs the following program the test program defined in {@link #testProgram(StreamExecutionEnvironment)} * followed by the checks in {@link #postSubmit}. */ @Test public void runCheckpointedProgram() throws Exception { try { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(PARALLELISM); env.enableCheckpointing(500); env.getConfig().disableSysoutLogging(); env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 0L)); testProgram(env); TestUtils.tryExecute(env, "Fault Tolerance Test"); postSubmit(); } catch (Exception e) { e.printStackTrace(); Assert.fail(e.getMessage()); } }
consuming.addSink(new DiscardingSink<String>()); tryExecute(env, "run auto offset reset test");
tryExecute(env, "Count elements from the topics");
.addSink(new PartitionValidatingSink(dynamicTopicPartitions)).setParallelism(1); tryExecute(env, "custom partitioning test");
tryExecute(env, "Broker failure once test");
tryExecute(env, "Read deletes from Kafka");
tryExecute(env, "One-source-multi-partitions exactly once test");
tryExecute(env, "One-to-one exactly once test");
tryExecute(env, "multi-source-one-partitions exactly once test");
tryExecute(env, "Read KV from Kafka");
TestUtils.tryExecute(env, "Exactly once test");
.setParallelism(1); tryExecute(env, "Sliding Window Test");
.setParallelism(1); tryExecute(env, "Tumbling Window Test");
TestUtils.tryExecute(env, "Fault Tolerance Test");
.setParallelism(1); tryExecute(env, "Aggregating Sliding Window Test");
.setParallelism(1); tryExecute(env, "Aggregating Tumbling Window Test");
@Test public void testSink() throws Exception { StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment(); environment.fromElements("string1", "string2") .addSink(new FlumeSink<>("172.25.0.3", 44444, new SimpleStringSchema())); tryExecute(environment, "FlumeTest"); }