public RepositoryContext newProcessContext(final Connectable connectable, final AtomicLong connectionIndex) { return new RepositoryContext(connectable, connectionIndex, contentRepo, flowFileRepo, flowFileEventRepo, counterRepo, provenanceRepo); } }
@Override public void adjustCounter(final String name, final long delta, final boolean immediate) { verifyTaskActive(); final Map<String, Long> counters; if (immediate) { if (immediateCounters == null) { immediateCounters = new HashMap<>(); } counters = immediateCounters; } else { if (countersOnCommit == null) { countersOnCommit = new HashMap<>(); } counters = countersOnCommit; } adjustCounter(name, delta, counters); if (immediate) { context.adjustCounter(name, delta); } }
public Set<Relationship> getAvailableRelationships() { final Set<Relationship> set = new HashSet<>(); for (final Relationship relationship : getConnectable().getRelationships()) { final Collection<Connection> connections = getConnections(relationship); if (connections.isEmpty()) { set.add(relationship); } else { boolean available = true; for (final Connection connection : connections) { if (connection.getFlowFileQueue().isFull()) { available = false; } } if (available) { set.add(relationship); } } } return set; } }
@Override public FlowFile get() { verifyTaskActive(); final List<Connection> connections = context.getPollableConnections(); final int numConnections = connections.size(); for (int numAttempts = 0; numAttempts < numConnections; numAttempts++) { final Connection conn = connections.get(context.getNextIncomingConnectionIndex() % numConnections); final Set<FlowFileRecord> expired = new HashSet<>(); final FlowFileRecord flowFile = conn.poll(expired); removeExpired(expired, conn); if (flowFile != null) { registerDequeuedRecord(flowFile, conn); return flowFile; } } return null; }
this.taskTermination = taskTermination; final Connectable connectable = context.getConnectable(); final String componentType; context.getProvenanceRepository(), this); this.sessionId = idGenerator.getAndIncrement(); this.connectableDescription = description; this.claimCache = new ContentClaimWriteCache(context.getContentRepository()); LOG.trace("Session {} created for {}", this, connectableDescription); processingStartTime = System.nanoTime();
throw new FlowFileHandlingException(record.getCurrent() + " transfer relationship not specified"); final List<Connection> destinations = new ArrayList<>(context.getConnections(relationship)); if (destinations.isEmpty() && !context.getConnectable().isAutoTerminated(relationship)) { if (relationship != Relationship.SELF) { rollback(); throw new FlowFileHandlingException(relationship + " does not have any destinations for " + context.getConnectable()); final FlowFileRecord currRec = record.getCurrent(); final StandardFlowFileRecord.Builder builder = new StandardFlowFileRecord.Builder().fromFlowFile(currRec); builder.id(context.getNextFlowFileSequence()); context.getContentRepository().incrementClaimaintCount(claim);
newClaim = context.getContentRepository().create(context.getConnectable().isLossTolerant()); claimLog.debug("Creating ContentClaim {} for 'importFrom' for {}", newClaim, destination); } catch (final IOException e) { long newSize = 0L; try { newSize = context.getContentRepository().importFrom(source, newClaim); bytesWritten += newSize; bytesRead += newSize;
final List<RepositoryRecord> expiredRecords = new ArrayList<>(flowFiles.size()); final Connectable connectable = context.getConnectable(); final String processorType = connectable.getComponentType(); final StandardProvenanceReporter expiredReporter = new StandardProvenanceReporter(this, connectable.getIdentifier(), processorType, context.getProvenanceRepository(), this); context.getProvenanceRepository().registerEvents(iterable); context.getFlowFileRepository().updateRepository(expiredRecords); } catch (final IOException e) { LOG.error("Failed to update FlowFile Repository to record expired records due to {}", e);
if (originalQueue != null) { if (penalize) { final long expirationEpochMillis = System.currentTimeMillis() + context.getConnectable().getPenalizationPeriod(TimeUnit.MILLISECONDS); final FlowFileRecord newFile = new StandardFlowFileRecord.Builder().fromFlowFile(record.getOriginal()).penaltyExpirationTime(expirationEpochMillis).build(); originalQueue.put(newFile); context.getFlowFileRepository().updateRepository(abortedRecords); } catch (final IOException ioe) { LOG.error("Unable to update FlowFile repository for aborted records due to {}", ioe.toString()); final RepositoryRecord repoRecord = new TransientClaimRepositoryRecord(transientClaims); try { context.getFlowFileRepository().updateRepository(Collections.singletonList(repoRecord)); } catch (final IOException ioe) { LOG.error("Unable to update FlowFile repository to cleanup transient claims due to {}", ioe.toString()); final Connectable connectable = context.getConnectable(); final StandardFlowFileEvent flowFileEvent = new StandardFlowFileEvent(); flowFileEvent.setBytesRead(bytesRead); context.getFlowFileEventRepository().updateRepository(flowFileEvent, connectable.getIdentifier()); } catch (final Exception e) { LOG.error("Failed to update FlowFileEvent Repository due to " + e);
final FlowFileRecord flowFile = record.getCurrent(); final long flowFileLife = System.currentTimeMillis() - flowFile.getEntryDate(); final Connectable connectable = context.getConnectable(); final Object terminator = connectable instanceof ProcessorNode ? ((ProcessorNode) connectable).getProcessor() : connectable; LOG.info("{} terminated by {}; life of FlowFile = {} ms", new Object[]{flowFile, terminator, flowFileLife}); context.getFlowFileRepository().updateRepository((Collection) repoRecords); } catch (final IOException ioe) { context.adjustCounter(entry.getKey(), entry.getValue());
private void decrementClaimCount(final ContentClaim claim) { if (claim == null) { return; } context.getContentRepository().decrementClaimantCount(claim); }
private void registerForkEvent(final FlowFile parent, final FlowFile child) { ProvenanceEventBuilder eventBuilder = forkEventBuilders.get(parent); if (eventBuilder == null) { eventBuilder = context.getProvenanceRepository().eventBuilder(); eventBuilder.setEventType(ProvenanceEventType.FORK); eventBuilder.setFlowFileEntryDate(parent.getEntryDate()); eventBuilder.setLineageStartDate(parent.getLineageStartDate()); eventBuilder.setFlowFileUUID(parent.getAttribute(CoreAttributes.UUID.key())); eventBuilder.setComponentId(context.getConnectable().getIdentifier()); final Connectable connectable = context.getConnectable(); final String processorType = connectable.getComponentType(); eventBuilder.setComponentType(processorType); eventBuilder.addParentFlowFile(parent); updateEventContentClaims(eventBuilder, parent, getRecord(parent)); forkEventBuilders.put(parent, eventBuilder); } eventBuilder.addChildFlowFile(child); }
final Connectable connectable = context.getConnectable(); final StandardFlowFileEvent flowFileEvent = new StandardFlowFileEvent(); flowFileEvent.setBytesRead(checkpoint.bytesRead); flowFileEvent.setCounters(counters); context.getFlowFileEventRepository().updateRepository(flowFileEvent, connectable.getIdentifier()); context.getFlowFileEventRepository().updateRepository(entry.getValue(), entry.getKey());
@Override public FlowFile clone(FlowFile example, final long offset, final long size) { verifyTaskActive(); example = validateRecordState(example); final StandardRepositoryRecord exampleRepoRecord = getRecord(example); final FlowFileRecord currRec = exampleRepoRecord.getCurrent(); final ContentClaim claim = exampleRepoRecord.getCurrentClaim(); if (offset + size > example.getSize()) { throw new FlowFileHandlingException("Specified offset of " + offset + " and size " + size + " exceeds size of " + example.toString()); } final StandardFlowFileRecord.Builder builder = new StandardFlowFileRecord.Builder().fromFlowFile(currRec); builder.id(context.getNextFlowFileSequence()); builder.contentClaimOffset(currRec.getContentClaimOffset() + offset); builder.size(size); final String newUuid = UUID.randomUUID().toString(); builder.addAttribute(CoreAttributes.UUID.key(), newUuid); final FlowFileRecord clone = builder.build(); if (claim != null) { context.getContentRepository().incrementClaimaintCount(claim); } final StandardRepositoryRecord record = new StandardRepositoryRecord(null); record.setWorking(clone, clone.getAttributes()); records.put(clone.getId(), record); if (offset == 0L && size == example.getSize()) { provenanceReporter.clone(example, clone); } else { registerForkEvent(example, clone); } return clone; }
if (!repositoryContext.isRelationshipAvailabilitySatisfied(requiredNumberOfAvailableRelationships)) { return InvocationResult.yield("Backpressure Applied"); shouldRun = repositoryContext.isRelationshipAvailabilitySatisfied(requiredNumberOfAvailableRelationships); procEvent.setProcessingNanos(processingNanos); procEvent.setInvocations(invocationCount); repositoryContext.getFlowFileEventRepository().updateRepository(procEvent, connectable.getIdentifier()); } catch (final IOException e) { logger.error("Unable to update FlowFileEvent Repository for {}; statistics may be inaccurate. Reason for failure: {}", connectable.getRunnableComponent(), e.toString());
@Override public FlowFile create() { verifyTaskActive(); final Map<String, String> attrs = new HashMap<>(); final String uuid = UUID.randomUUID().toString(); attrs.put(CoreAttributes.FILENAME.key(), uuid); attrs.put(CoreAttributes.PATH.key(), DEFAULT_FLOWFILE_PATH); attrs.put(CoreAttributes.UUID.key(), uuid); final FlowFileRecord fFile = new StandardFlowFileRecord.Builder().id(context.getNextFlowFileSequence()) .addAttributes(attrs) .build(); final StandardRepositoryRecord record = new StandardRepositoryRecord(null); record.setWorking(fFile, attrs); records.put(fFile.getId(), record); createdFlowFiles.add(fFile.getAttribute(CoreAttributes.UUID.key())); return fFile; }
final List<Connection> connections = context.getPollableConnections(); if(connections.isEmpty()) { return Collections.emptyList();
public void expireFlowFiles() { final Set<FlowFileRecord> expired = new HashSet<>(); final FlowFileFilter filter = new FlowFileFilter() { @Override public FlowFileFilterResult filter(final FlowFile flowFile) { return FlowFileFilterResult.REJECT_AND_CONTINUE; } }; for (final Connection conn : context.getConnectable().getIncomingConnections()) { do { expired.clear(); conn.getFlowFileQueue().poll(filter, expired); removeExpired(expired, conn); } while (!expired.isEmpty()); } }
final String relationshipName = event.getRelationship(); final Relationship relationship = new Relationship.Builder().name(relationshipName).build(); final Collection<Connection> connectionsForRelationship = this.context.getConnections(relationship);
procEvent.setProcessingNanos(processingNanos); procEvent.setInvocations(invocationCount); context.getFlowFileEventRepository().updateRepository(procEvent, connectable.getIdentifier()); } catch (final IOException e) { logger.error("Unable to update FlowFileEvent Repository for {}; statistics may be inaccurate. Reason for failure: {}", connectable, e.toString());