private void decrementClaimCount(final ContentClaim claim) { if (claim == null) { return; } context.getContentRepository().decrementClaimantCount(claim); }
/** * Destroys a ContentClaim that was being written to but is no longer needed * * @param claim claim to destroy */ private void destroyContent(final ContentClaim claim) { if (claim == null) { return; } final int decrementedClaimCount = context.getContentRepository().decrementClaimantCount(claim); if (decrementedClaimCount <= 0) { resetWriteClaims(); // Have to ensure that we are not currently writing to the claim before we can destroy it. context.getContentRepository().remove(claim); } }
this.sessionId = idGenerator.getAndIncrement(); this.connectableDescription = description; this.claimCache = new ContentClaimWriteCache(context.getContentRepository()); LOG.trace("Session {} created for {}", this, connectableDescription); processingStartTime = System.nanoTime();
newClaim = context.getContentRepository().create(context.getConnectable().isLossTolerant()); claimLog.debug("Creating ContentClaim {} for 'importFrom' for {}", newClaim, destination); } catch (final IOException e) { long newSize = 0L; try { newSize = context.getContentRepository().importFrom(source, newClaim); bytesWritten += newSize; bytesRead += newSize;
final InputStream rawInStream = context.getContentRepository().read(claim); } else { claimCache.flush(claim); final InputStream rawInStream = context.getContentRepository().read(claim); try { StreamUtils.skip(rawInStream, offset);
try { try { newClaim = context.getContentRepository().create(context.getConnectable().isLossTolerant()); claimLog.debug("Creating ContentClaim {} for 'importFrom' for {}", newClaim, destination); newSize = context.getContentRepository().importFrom(createTaskTerminationStream(source), newClaim); bytesWritten += newSize; } catch (final IOException e) {
context.getContentRepository().incrementClaimaintCount(claim);
final ContentRepository contentRepo = context.getContentRepository(); final ContentClaim newClaim; try {
context.getContentRepository().decrementClaimantCount(record.getWorkingClaim()); record.addTransientClaim(record.getWorkingClaim());
@Override public void exportTo(FlowFile source, final Path destination, final boolean append) { verifyTaskActive(); source = validateRecordState(source); final StandardRepositoryRecord record = getRecord(source); try { ensureNotAppending(record.getCurrentClaim()); claimCache.flush(record.getCurrentClaim()); final long copyCount = context.getContentRepository().exportTo(record.getCurrentClaim(), destination, append, record.getCurrentClaimOffset(), source.getSize()); bytesRead += copyCount; bytesWritten += copyCount; } catch (final ContentNotFoundException nfe) { handleContentNotFound(nfe, record); } catch (final Throwable t) { throw new FlowFileAccessException("Failed to export " + source + " to " + destination + " due to " + t.toString(), t); } }
@Override public FlowFile clone(FlowFile example, final long offset, final long size) { verifyTaskActive(); example = validateRecordState(example); final StandardRepositoryRecord exampleRepoRecord = getRecord(example); final FlowFileRecord currRec = exampleRepoRecord.getCurrent(); final ContentClaim claim = exampleRepoRecord.getCurrentClaim(); if (offset + size > example.getSize()) { throw new FlowFileHandlingException("Specified offset of " + offset + " and size " + size + " exceeds size of " + example.toString()); } final StandardFlowFileRecord.Builder builder = new StandardFlowFileRecord.Builder().fromFlowFile(currRec); builder.id(context.getNextFlowFileSequence()); builder.contentClaimOffset(currRec.getContentClaimOffset() + offset); builder.size(size); final String newUuid = UUID.randomUUID().toString(); builder.addAttribute(CoreAttributes.UUID.key(), newUuid); final FlowFileRecord clone = builder.build(); if (claim != null) { context.getContentRepository().incrementClaimaintCount(claim); } final StandardRepositoryRecord record = new StandardRepositoryRecord(null); record.setWorking(clone, clone.getAttributes()); records.put(clone.getId(), record); if (offset == 0L && size == example.getSize()) { provenanceReporter.clone(example, clone); } else { registerForkEvent(example, clone); } return clone; }
newClaim = context.getContentRepository().create(context.getConnectable().isLossTolerant()); claimLog.debug("Creating ContentClaim {} for 'append' for {}", newClaim, source); final OutputStream rawOutStream = context.getContentRepository().write(newClaim); final OutputStream bufferedOutStream = new BufferedOutputStream(rawOutStream); outStream = new ByteCountingOutputStream(bufferedOutStream);