@Override protected void init(@Nonnull Context context) throws Exception { processorIndex = sharedFileSystem ? context.globalProcessorIndex() : context.localProcessorIndex(); parallelism = sharedFileSystem ? context.totalParallelism() : context.localParallelism(); try (DirectoryStream<Path> directoryStream = Files.newDirectoryStream(watchedDirectory)) { for (Path file : directoryStream) { if (Files.isRegularFile(file)) { // Negative offset means "initial offset", needed to skip the first line fileOffsets.put(file, new FileOffset(-Files.size(file), "")); } } } watcher = FileSystems.getDefault().newWatchService(); watchedDirectory.register(watcher, WATCH_EVENT_KINDS, WATCH_EVENT_MODIFIERS); getLogger().info("Started to watch directory: " + watchedDirectory); }
@Override protected void init(@Nonnull Context context) { processorIndex = context.globalProcessorIndex(); totalParallelism = context.totalParallelism(); snapshottingEnabled = context.snapshottingEnabled(); consumer = new KafkaConsumer<>(properties); assignPartitions(false); }
@Override protected void init(@Nonnull Context context) { processorIndex = context.globalProcessorIndex(); totalParallelism = context.totalParallelism(); snapshottingEnabled = context.snapshottingEnabled(); consumer = new KafkaConsumer<>(properties); assignPartitions(false); }
@Override protected void init(@Nonnull Context context) { this.connection = connectionSupplier.get(); this.parallelism = context.totalParallelism(); this.index = context.globalProcessorIndex(); }
@Override protected void init(@Nonnull Context context) throws Exception { processorIndex = sharedFileSystem ? context.globalProcessorIndex() : context.localProcessorIndex(); parallelism = sharedFileSystem ? context.totalParallelism() : context.localParallelism(); directoryStream = Files.newDirectoryStream(directory, glob); outputTraverser = Traversers.traverseIterator(directoryStream.iterator()) .filter(this::shouldProcessEvent) .flatMap(this::processFile); }