@SuppressWarnings("unchecked") public T deserializeValue(ClassLoader loader) throws IOException, ClassNotFoundException { Preconditions.checkNotNull(loader, "No classloader has been passed"); return serializedData == null ? null : (T) InstantiationUtil.deserializeObject(serializedData, loader); }
@Override public String toString() { if (this.toString == null) { final byte[] ba = new byte[SIZE]; longToByteArray(this.lowerPart, ba, 0); longToByteArray(this.upperPart, ba, SIZE_OF_LONG); this.toString = StringUtils.byteToHexString(ba); } return this.toString; }
/** * Returns a valid address for Akka. It returns a String of format 'host:port'. * When an IPv6 address is specified, it normalizes the IPv6 address to avoid * complications with the exact URL match policy of Akka. * @param host The hostname, IPv4 or IPv6 address * @param port The port * @return host:port where host will be normalized if it is an IPv6 address */ public static String unresolvedHostAndPortToNormalizedString(String host, int port) { Preconditions.checkArgument(port >= 0 && port < 65536, "Port is not within the valid range,"); return unresolvedHostToNormalizedString(host) + ":" + port; }
public AggregatingUdf(AggregationFunction<Object>[] aggFunctions, int[] fieldPositions) { Preconditions.checkNotNull(aggFunctions); Preconditions.checkNotNull(aggFunctions); Preconditions.checkArgument(aggFunctions.length == fieldPositions.length); this.aggFunctions = aggFunctions; this.fieldPositions = fieldPositions; }
protected Resource(String name, double value, ResourceAggregateType type) { this.name = checkNotNull(name); this.value = value; this.resourceAggregateType = checkNotNull(type); }
/** * @return same as {@link #get()} but throws a {@link FlinkRuntimeException}. */ public T getUnchecked() throws FlinkRuntimeException { if (value != null) { return value; } checkNotNull(failureCause); throw new FlinkRuntimeException(failureCause); }
/** * @return stored value or throw a {@link FlinkException} with {@code failureCause}. */ public T get() throws FlinkException { if (value != null) { return value; } checkNotNull(failureCause); throw new FlinkException(failureCause); }
public <T> TypeSerializer<T> getTypeSerializerSideOut(OutputTag<?> outputTag, ClassLoader cl) { Preconditions.checkNotNull(outputTag, "Side output id must not be null."); try { return InstantiationUtil.readObjectFromConfig(this.config, TYPE_SERIALIZER_SIDEOUT_PREFIX + outputTag.getId(), cl); } catch (Exception e) { throw new StreamTaskException("Could not instantiate serializer.", e); } }
private IncrementalCleanupStrategy( int cleanupSize, boolean runCleanupForEveryRecord) { Preconditions.checkArgument(cleanupSize >= 0, "Number of incrementally cleaned up state entries cannot be negative."); this.cleanupSize = cleanupSize; this.runCleanupForEveryRecord = runCleanupForEveryRecord; }
@Override public void join(T1 first, T2 second, Collector<Tuple2<T1, T2>> out) throws Exception { outTuple.f0 = first; outTuple.f1 = second; out.collect(outTuple); } }
default void close() throws Exception { try { closeAsync().get(); } catch (ExecutionException e) { throw new FlinkException("Could not close resource.", ExceptionUtils.stripExecutionException(e)); } }
/** * Unpacks an {@link ExecutionException} and returns its cause. Otherwise the given * Throwable is returned. * * @param throwable to unpack if it is an ExecutionException * @return Cause of ExecutionException or given Throwable */ public static Throwable stripExecutionException(Throwable throwable) { return stripException(throwable, ExecutionException.class); }
@Override public Iterator<T> iterator() { if (iteratorAvailable) { iteratorAvailable = false; return this; } else { throw new TraversableOnceException(); } } }
public boolean isClosed() { synchronized (getSynchronizationLock()) { return closed; } }
/** * Creates a new data input stream from the given Hadoop input stream. * * @param fsDataInputStream The Hadoop input stream */ public HadoopDataInputStream(org.apache.hadoop.fs.FSDataInputStream fsDataInputStream) { this.fsDataInputStream = checkNotNull(fsDataInputStream); }
/** * Unpacks an {@link CompletionException} and returns its cause. Otherwise the given * Throwable is returned. * * @param throwable to unpack if it is an CompletionException * @return Cause of CompletionException or given Throwable */ public static Throwable stripCompletionException(Throwable throwable) { return stripException(throwable, CompletionException.class); }
/** * Configures how to partition records from Flink's partitions into Kafka's partitions. * * <p>This strategy allows for a custom partitioner by providing an implementation * of {@link FlinkKafkaPartitioner}. */ public Kafka sinkPartitionerCustom(Class<? extends FlinkKafkaPartitioner> partitionerClass) { sinkPartitionerType = CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM; sinkPartitionerClass = Preconditions.checkNotNull(partitionerClass); return this; }
public KryoRegistration(Class<?> registeredClass, Class<? extends Serializer<?>> serializerClass) { this.registeredClass = Preconditions.checkNotNull(registeredClass); this.serializerClass = Preconditions.checkNotNull(serializerClass); this.serializableSerializerInstance = null; this.serializerDefinitionType = SerializerDefinitionType.CLASS; }
/** * Translate {@link Edge} values using the given {@link TranslateFunction}. * * @param translator implements conversion from {@code OLD} to {@code NEW} */ public TranslateEdgeValues(TranslateFunction<OLD, NEW> translator) { Preconditions.checkNotNull(translator); this.translator = translator; }
/** * Creates an AbstractDeserializationSchema that returns the given TypeInformation * for the produced type. This constructor is only necessary when creating a generic * implementation, see {@link AbstractDeserializationSchema Generic Use}. * * @param typeInfo The TypeInformation for the produced type. */ protected AbstractDeserializationSchema(TypeInformation<T> typeInfo) { this.type = checkNotNull(typeInfo, "typeInfo"); }