/** * Use {@link SinkProcessors#writeFileP(String, DistributedFunction, Charset, boolean)} */ public static <T> ProcessorMetaSupplier metaSupplier( @Nonnull String directoryName, @Nonnull DistributedFunction<? super T, ? extends String> toStringFn, @Nonnull String charset, boolean append) { return ProcessorMetaSupplier.preferLocalParallelismOne(writeBufferedP( ctx -> createBufferedWriter(Paths.get(directoryName), ctx.globalProcessorIndex(), charset, append), (fileWriter, item) -> { fileWriter.write(toStringFn.apply((T) item)); fileWriter.newLine(); }, BufferedWriter::flush, BufferedWriter::close )); }
@Override protected void init(@Nonnull Context context) throws Exception { Path path = Paths.get(this.path, context.jetInstance().getName() + '-' + context.globalProcessorIndex()); writer = Files.newBufferedWriter(path, StandardCharsets.UTF_8); }
@Override public final void init(@Nonnull Outbox outbox, @Nonnull Context context) throws Exception { this.outbox = outbox; this.logger = context.logger(); init(context); }
for (int i = 0; i < emittedCounts.length() - (context.snapshottingEnabled() ? 0 : 1); i++) { int finalI = i; probeBuilder
@Override public void init(@Nonnull Outbox outbox, @Nonnull Context context) throws Exception { logger = context.logger(); outbox = new LoggingOutbox(outbox, peekOutput, peekSnapshot); // Fix issue #595: pass a logger with real class name to processor // We do this only if context is ProcCtx (that is, not for tests where TestProcessorContext can be used // and also other objects could be mocked or null, such as jetInstance()) if (context instanceof ProcCtx) { ProcCtx c = (ProcCtx) context; NodeEngine nodeEngine = ((HazelcastInstanceImpl) c.jetInstance().getHazelcastInstance()).node.nodeEngine; ILogger newLogger = nodeEngine.getLogger( createLoggerName( getWrapped().getClass().getName(), c.jobConfig().getName(), c.vertexName(), c.globalProcessorIndex()) ); context = new ProcCtx(c.jetInstance(), c.jobId(), c.executionId(), c.jobConfig(), newLogger, c.vertexName(), c.localProcessorIndex(), c.globalProcessorIndex(), c.processingGuarantee(), c.localParallelism(), c.memberIndex(), c.memberCount()); } super.init(outbox, context); }
/** * Returns a supplier of processors for {@link AvroSinks#files}. */ @Nonnull public static <D> ProcessorMetaSupplier writeFilesP( @Nonnull String directoryName, @Nonnull DistributedSupplier<Schema> schemaSupplier, @Nonnull DistributedSupplier<DatumWriter<D>> datumWriterSupplier ) { return ProcessorMetaSupplier.of( WriteBufferedP.<DataFileWriter<D>, D>supplier( context -> createWriter(Paths.get(directoryName), context.globalProcessorIndex(), schemaSupplier, datumWriterSupplier), DataFileWriter::append, DataFileWriter::flush, DataFileWriter::close ), 1); }
@Override protected void init(@Nonnull Context context) throws Exception { processorIndex = sharedFileSystem ? context.globalProcessorIndex() : context.localProcessorIndex(); parallelism = sharedFileSystem ? context.totalParallelism() : context.localParallelism(); try (DirectoryStream<Path> directoryStream = Files.newDirectoryStream(watchedDirectory)) { for (Path file : directoryStream) { if (Files.isRegularFile(file)) { // Negative offset means "initial offset", needed to skip the first line fileOffsets.put(file, new FileOffset(-Files.size(file), "")); } } } watcher = FileSystems.getDefault().newWatchService(); watchedDirectory.register(watcher, WATCH_EVENT_KINDS, WATCH_EVENT_MODIFIERS); getLogger().info("Started to watch directory: " + watchedDirectory); }
@Override protected void init(@Nonnull Context context) { HazelcastInstance instance; if (isRemote()) { instance = client = newHazelcastClient(asClientConfig(clientXml)); } else { instance = context.jetInstance().getHazelcastInstance(); } IList<Object> list = instance.getList(name); final int size = list.size(); traverser = size <= FETCH_SIZE ? traverseIterable(list) : traverseStream(rangeClosed(0, size / FETCH_SIZE).mapToObj(chunkIndex -> chunkIndex * FETCH_SIZE)) .flatMap(start -> traverseIterable(list.subList(start, min(start + FETCH_SIZE, size)))); }
public static StreamSource<Aircraft> flightDataSource(String url, long pollIntervalMillis, long allowedLateness) { return SourceBuilder.timestampedStream("Flight Data Source", ctx -> new FlightDataSource(ctx.logger(), url, pollIntervalMillis)) .fillBufferFn(FlightDataSource::fillBuffer) .allowedLateness(allowedLateness) .build(); } }
@Override protected void init(@Nonnull Context context) { if (!contextFactory.isSharedLocally()) { assert contextObject == null : "contextObject is not null: " + contextObject; contextObject = contextFactory.createFn().apply(context.jetInstance()); } maxAsyncOps = contextFactory.getMaxPendingCallsPerProcessor(); queue = new ArrayDeque<>(maxAsyncOps); }
@Override protected void init(@Nonnull Context context) { if (!contextFactory.isSharedLocally()) { assert contextObject == null : "contextObject is not null: " + contextObject; contextObject = contextFactory.createFn().apply(context.jetInstance()); } }
@Override protected void init(@Nonnull Context context) { processorIndex = context.globalProcessorIndex(); totalParallelism = context.totalParallelism(); snapshottingEnabled = context.snapshottingEnabled(); consumer = new KafkaConsumer<>(properties); assignPartitions(false); }
@Override protected void init(@Nonnull Context context) { if (!contextFactory.isSharedLocally()) { assert contextObject == null : "contextObject is not null: " + contextObject; contextObject = contextFactory.createFn().apply(context.jetInstance()); } maxAsyncOps = contextFactory.getMaxPendingCallsPerProcessor(); resultQueue = new ManyToOneConcurrentArrayQueue<>(maxAsyncOps); }
@SuppressFBWarnings(value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE", justification = "jetInstance() can be null in TestProcessorContext") private ILogger getLogger(@Nonnull Context context) { return context.jetInstance() != null ? context.jetInstance().getHazelcastInstance().getLoggingService().getLogger(getClass() + "." + toString()) : Logger.getLogger(getClass()); }
@Override protected void init(@Nonnull Context context) { this.connection = connectionSupplier.get(); this.parallelism = context.totalParallelism(); this.index = context.globalProcessorIndex(); }
@Override public void init(@Nonnull Outbox outbox, @Nonnull Context context) { logger = context.logger(); connectAndPrepareStatement(); }
@Override public String toString() { String jobPrefix = context.jobConfig().getName() == null ? "" : context.jobConfig().getName() + "/"; return "ProcessorTasklet{" + jobPrefix + context.vertexName() + '#' + context.globalProcessorIndex() + '}'; }
private void closeProcessor() { assert !processorClosed : "processor already closed"; try { processor.close(); } catch (Exception e) { logger.severe(jobNameAndExecutionId(context.jobConfig().getName(), context.executionId()) + " encountered an exception in Processor.close(), ignoring it", e); } }
@Override protected void init(@Nonnull Context context) throws Exception { processorIndex = sharedFileSystem ? context.globalProcessorIndex() : context.localProcessorIndex(); parallelism = sharedFileSystem ? context.totalParallelism() : context.localParallelism(); directoryStream = Files.newDirectoryStream(directory, glob); outputTraverser = Traversers.traverseIterator(directoryStream.iterator()) .filter(this::shouldProcessEvent) .flatMap(this::processFile); }
@Override protected void init(@Nonnull Context context) { processorIndex = context.globalProcessorIndex(); totalParallelism = context.totalParallelism(); snapshottingEnabled = context.snapshottingEnabled(); consumer = new KafkaConsumer<>(properties); assignPartitions(false); }