Lineage computeLineage(final String flowFileUuid, final NiFiUser user) throws IOException { return computeLineage(Collections.<String>singleton(flowFileUuid), user, LineageComputationType.FLOWFILE_LINEAGE, null, 0L, Long.MAX_VALUE); }
/** * Recursively deletes the given directory. If unable to delete the * directory, will emit a WARN level log event and move on. * * @param dir the directory to delete */ private void deleteDirectory(final File dir) { if (dir == null || !dir.exists()) { return; } final File[] children = dir.listFiles(); if (children == null) { return; } for (final File child : children) { if (child.isDirectory()) { deleteDirectory(child); } else if (!child.delete()) { logger.warn("Unable to remove index directory {}; this directory should be cleaned up manually", child.getAbsolutePath()); } } if (!dir.delete()) { logger.warn("Unable to remove index directory {}; this directory should be cleaned up manually", dir); } }
final List<File> indexDirs = getAllIndexDirectories(); if (indexDirs.size() < 2) { this.firstEventTimestamp = determineFirstEventTimestamp(); return; final long latestTimestampOfFirstIndex = getIndexTimestamp(indexDirs.get(1)); final List<File> logFiles = getSortedLogFiles(); if (logFiles.isEmpty()) { this.firstEventTimestamp = System.currentTimeMillis(); getIndexManager().removeIndex(indexingDirectory); indexConfig.removeIndexDirectory(indexingDirectory); deleteDirectory(indexingDirectory);
future = rolloverExecutor.scheduleWithFixedDelay(rolloverRunnable, 0, getRolloverRetryMillis(), TimeUnit.MILLISECONDS); futureReference.set(future); int journalFileCount = getJournalCount(); long repoSize = getSize(getLogFiles(), 0L); final int journalCountThreshold = configuration.getJournalCount() * 5; final long sizeThreshold = (long) (configuration.getMaxStorageCapacity() * ROLLOVER_HIGH_WATER); purgeOldEvents(); journalFileCount = getJournalCount(); repoSize = getSize(getLogFiles(), 0L); continue; } else { + "threshold for blocking is {} ({} bytes)", journalFileCount, repoSize, journalCountThreshold, sizeThreshold); journalFileCount = getJournalCount(); repoSize = getSize(getLogFiles(), 0L); writers = createWriters(configuration, idGenerator.get()); dirtyWriterCount.set(0); streamStartTime.set(System.currentTimeMillis());
final List<File> availableFiles = filterUnavailableFiles(journalFiles); final int numAvailableFiles = availableFiles.size(); + "so assuming that the merge did not finish. Repeating procedure in order to ensure consistency."); final DeleteIndexAction deleteAction = new DeleteIndexAction(this, indexConfig, getIndexManager()); try { deleteAction.execute(suggestedMergeFile); writer.writeHeader(minEventId); final IndexingAction indexingAction = createIndexingAction(); final List<Future<?>> futures = new ArrayList<>(); final EventIndexWriter indexWriter = getIndexManager().borrowIndexWriter(indexingDirectory); try { final ExecutorService exec = Executors.newFixedThreadPool(configuration.getIndexThreadPoolSize(), new ThreadFactory() { latestRecords.add(truncateAttributes(record)); records++; getIndexManager().returnIndexWriter(indexWriter);
final List<ProvenanceEventRecord> records = new ArrayList<>(maxRecords); final List<Path> paths = getPathsForId(firstRecordId); if (paths == null || paths.isEmpty()) { return records; try (RecordReader reader = RecordReaders.newRecordReader(path.toFile(), getAllLogFiles(), maxAttributeChars)) { if (record.getEventId() >= firstRecordId && isAuthorized(record, user)) { records.add(record);
this.resourceFactory = resourceFactory; recover(); writers = createWriters(configuration, idGenerator.get()); firstEventTimestamp = determineFirstEventTimestamp(); } finally { writeLock.unlock();
/** * @return a List of all Provenance Event Log Files, sorted in ascending * order by the first Event ID in each file */ private List<File> getSortedLogFiles() { final List<Path> paths = new ArrayList<>(getAllLogFiles()); Collections.sort(paths, new Comparator<Path>() { @Override public int compare(final Path o1, final Path o2) { return Long.compare(getFirstEventId(o1.toFile()), getFirstEventId(o2.toFile())); } }); final List<File> files = new ArrayList<>(paths.size()); for (final Path path : paths) { files.add(path.toFile()); } return files; }
try (final RecordReader reader = RecordReaders.newRecordReader(maxIdFile, getAllLogFiles(), maxAttributeChars)) { final long eventId = reader.getMaxEventId(); if (eventId > maxId) { final Set<File> recoveredJournals = recoverJournalFiles(); filesToRecover.addAll(recoveredJournals);
final List<ProvenanceEventRecord> latestList = filterUnauthorizedEvents(latestRecords.asList(), user); final List<ProvenanceEventRecord> trimmed; if (latestList.size() > query.getMaxResults()) { Long maxEventId = getMaxEventId(); if (maxEventId == null) { result.getResult().update(Collections.<ProvenanceEventRecord>emptyList(), 0L);
@Override public List<ProvenanceEventRecord> filterUnauthorizedEvents(List<ProvenanceEventRecord> events) { return repository.filterUnauthorizedEvents(events, user); }
@Override public void authorize(ProvenanceEventRecord event) throws AccessDeniedException { repository.authorize(event, user); }
@Override public Set<ProvenanceEventRecord> convert(TopDocs topDocs, IndexReader indexReader) throws IOException { // Always authorized. We do this because we need to pull back the event, regardless of whether or not // the user is truly authorized, because instead of ignoring unauthorized events, we want to replace them. final EventAuthorizer authorizer = EventAuthorizer.GRANT_ALL; final DocsReader docsReader = new DocsReader(); return docsReader.read(topDocs, authorizer, indexReader, getAllLogFiles(), new AtomicInteger(0), Integer.MAX_VALUE, maxAttributeChars); } };
final Collection<Path> allLogFiles = getAllLogFiles(); return new Iterator<ProvenanceEventRecord>() { int count = 0;
matchingRecords = docsReader.read(topDocs, authorizer, searcher.getIndexSearcher().getIndexReader(), repository.getAllLogFiles(), retrievedCount, provenanceQuery.getMaxResults(), maxAttributeChars);
try (final RecordReader reader = RecordReaders.newRecordReader(expiredFile, repository.getAllLogFiles(), Integer.MAX_VALUE)) { maxEventId = reader.getMaxEventId(); } catch (final IOException ioe) {