/** * @return a new {@link StepActionWorker} */ public StepActionWorker<SSWFInput, StepEnum> build() { if (stepEnumClass == null) throw new IllegalArgumentException("stepEnumClass was null"); if (domain == null) throw new IllegalArgumentException("domain was null"); if (taskList == null) throw new IllegalArgumentException("taskList was null"); if (swf == null) throw new IllegalArgumentException("swf was null"); if (inputParser == null) throw new IllegalArgumentException("inputParser was null"); if (workflowDefinition == null) throw new IllegalArgumentException("workflowDefinition was null"); if (logger == null) throw new IllegalArgumentException("logger was null"); return new StepActionWorker<>(domain, taskList, swf, inputParser, workflowDefinition, logger, ClassTag$.MODULE$.apply(stepEnumClass)); }
private void tryReadNext() { try { next = sparkDeserializationStream.readObject(ClassTag$.MODULE$.Any()); } catch (Exception e) { // ~ need to go with this ugly instanceof construct since the // scala method does not declare the exception as part of the // readObject method's signature. but it actually does throw it // to signal the end-of-stream. if (EOFException.class.isAssignableFrom(e.getClass())) { streamFinished = true; } else { throw e; } } }
public static <T> ClassTag<T> classTag(Class<T> source) { return scala.reflect.ClassTag$.MODULE$.apply(source); }
public DeepJavaRDD(DeepRDD<T, S> rdd) { super(rdd, ClassTag$.MODULE$.<T>apply(rdd.config.value().getEntityClass())); }
@Override public CompletableFuture<ExecutionState> requestPartitionProducerState( JobID jobId, IntermediateDataSetID intermediateDataSetId, ResultPartitionID resultPartitionId) { JobManagerMessages.RequestPartitionProducerState msg = new JobManagerMessages.RequestPartitionProducerState( jobId, intermediateDataSetId, resultPartitionId ); scala.concurrent.Future<ExecutionState> futureResponse = jobManager .ask(msg, timeout) .mapTo(ClassTag$.MODULE$.<ExecutionState>apply(ExecutionState.class)); return FutureUtils.toJava(futureResponse); }
@Override public Future<ExecutionState> requestPartitionProducerState( JobID jobId, IntermediateDataSetID intermediateDataSetId, ResultPartitionID resultPartitionId) { JobManagerMessages.RequestPartitionProducerState msg = new JobManagerMessages.RequestPartitionProducerState( jobId, intermediateDataSetId, resultPartitionId ); scala.concurrent.Future<ExecutionState> futureResponse = jobManager .ask(msg, timeout) .mapTo(ClassTag$.MODULE$.<ExecutionState>apply(ExecutionState.class)); return new FlinkFuture<>(futureResponse); }
@SuppressWarnings("unchecked") public static <T> ClassTag<T> anyClassTag() { return (ClassTag<T>) ClassTag$.MODULE$.AnyRef(); }
@Override public CompletableFuture<Acknowledge> updatePartitions(ExecutionAttemptID executionAttemptID, Iterable<PartitionInfo> partitionInfos, Time timeout) { Preconditions.checkNotNull(executionAttemptID); Preconditions.checkNotNull(partitionInfos); TaskMessages.UpdatePartitionInfo updatePartitionInfoMessage = new TaskMessages.UpdateTaskMultiplePartitionInfos( executionAttemptID, partitionInfos); scala.concurrent.Future<Acknowledge> updatePartitionsResult = actorGateway.ask( updatePartitionInfoMessage, new FiniteDuration(timeout.getSize(), timeout.getUnit())) .mapTo(ClassTag$.MODULE$.<Acknowledge>apply(Acknowledge.class)); return FutureUtils.toJava(updatePartitionsResult); }
@Override public CompletableFuture<ExecutionState> requestPartitionProducerState( JobID jobId, IntermediateDataSetID intermediateDataSetId, ResultPartitionID resultPartitionId) { JobManagerMessages.RequestPartitionProducerState msg = new JobManagerMessages.RequestPartitionProducerState( jobId, intermediateDataSetId, resultPartitionId ); scala.concurrent.Future<ExecutionState> futureResponse = jobManager .ask(msg, timeout) .mapTo(ClassTag$.MODULE$.<ExecutionState>apply(ExecutionState.class)); return FutureUtils.toJava(futureResponse); }
@Override public <R> JavaRDD<R> map(final Function<Tuple2<K, V>, R> f) { return rdd.map(f, ClassTag$.MODULE$.apply(Object.class)).toJavaRDD(); }
public Graph<RyaTypeWritable, RyaTypeWritable> createGraph(SparkContext sc, Configuration conf) throws IOException, AccumuloSecurityException{ StorageLevel storageLvl1 = StorageLevel.MEMORY_ONLY(); StorageLevel storageLvl2 = StorageLevel.MEMORY_ONLY(); ClassTag<RyaTypeWritable> RTWTag = ClassTag$.MODULE$.apply(RyaTypeWritable.class); RyaTypeWritable rtw = null; RDD<Tuple2<Object, RyaTypeWritable>> vertexRDD = getVertexRDD(sc, conf); RDD<Tuple2<Object, Edge>> edgeRDD = getEdgeRDD(sc, conf); JavaRDD<Tuple2<Object, Edge>> jrddTuple = edgeRDD.toJavaRDD(); JavaRDD<Edge<RyaTypeWritable>> jrdd = jrddTuple.map(tuple -> tuple._2); RDD<Edge<RyaTypeWritable>> goodERDD = JavaRDD.toRDD(jrdd); return Graph.apply(vertexRDD, goodERDD, rtw, storageLvl1, storageLvl2, RTWTag, RTWTag); } }
/** * Map transform. * * @param func function to apply. * @param <O> output type. * @return the JavaRDD with the extended DAG. */ @Override public <O> JavaRDD<O> map(final Function<T, O> func) { return rdd.map(func, ClassTag$.MODULE$.apply(Object.class)).toJavaRDD(); }
@Override public JavaPairRDD<K, V> reduceByKey(final Function2<V, V, V> func) { // Explicit conversion final PairRDDFunctions<K, V> pairRdd = RDD.rddToPairRDDFunctions( rdd, ClassTag$.MODULE$.apply(Object.class), ClassTag$.MODULE$.apply(Object.class), null); final RDD<Tuple2<K, V>> reducedRdd = pairRdd.reduceByKey(func); return JavaPairRDD.fromRDD(reducedRdd); }
public static <T> ClassTag<T> getManifest(Class<T> clazz) { return ClassTag$.MODULE$.apply(clazz); }
@SuppressWarnings("unchecked") @Override public void writeObject(Object o) { if (type == null) { type = ClassTag$.MODULE$.apply(o.getClass()); } sparkSerializationStream.writeObject(o, type); }
@SuppressWarnings("unchecked") public static <T> ClassTag<T> anyClassTag() { return (ClassTag<T>) ClassTag$.MODULE$.AnyRef(); }
@Override public CompletableFuture<Acknowledge> updatePartitions(ExecutionAttemptID executionAttemptID, Iterable<PartitionInfo> partitionInfos, Time timeout) { Preconditions.checkNotNull(executionAttemptID); Preconditions.checkNotNull(partitionInfos); TaskMessages.UpdatePartitionInfo updatePartitionInfoMessage = new TaskMessages.UpdateTaskMultiplePartitionInfos( executionAttemptID, partitionInfos); scala.concurrent.Future<Acknowledge> updatePartitionsResult = actorGateway.ask( updatePartitionInfoMessage, new FiniteDuration(timeout.getSize(), timeout.getUnit())) .mapTo(ClassTag$.MODULE$.<Acknowledge>apply(Acknowledge.class)); return FutureUtils.toJava(updatePartitionsResult); }
@SuppressWarnings("unchecked") public static <T> ClassTag<T> anyClassTag() { return (ClassTag<T>) ClassTag$.MODULE$.AnyRef(); }
private PathBindable<?> pathBindableFor(Class<?> clazz) { PathBindable<?> builtIn = Scala.orNull(PathBindable$.MODULE$.pathBindableRegister().get(clazz)); if (builtIn != null) { return builtIn; } else if (play.mvc.PathBindable.class.isAssignableFrom(clazz)) { return PathBindable$.MODULE$.javaPathBindable((ClassTag) ClassTag$.MODULE$.apply(clazz)); } else if (clazz.equals(Object.class)) { // Special case for object, treat as a string return PathBindable.bindableString$.MODULE$; } else { throw new IllegalArgumentException("Don't know how to bind argument of type " + clazz); } }
private PathBindable<?> pathBindableFor(Class<?> clazz) { PathBindable<?> builtIn = Scala.orNull(PathBindable$.MODULE$.pathBindableRegister().get(clazz)); if (builtIn != null) { return builtIn; } else if (play.mvc.PathBindable.class.isAssignableFrom(clazz)) { return PathBindable$.MODULE$.javaPathBindable((ClassTag) ClassTag$.MODULE$.apply(clazz)); } else if (clazz.equals(Object.class)) { // Special case for object, treat as a string return PathBindable.bindableString$.MODULE$; } else { throw new IllegalArgumentException("Don't know how to bind argument of type " + clazz); } }