@Override public long getBytesWritten() { return countingOut.getBytesWritten(); } }
@Override public long getBytesWritten() { return countingOut.getBytesWritten(); }
@Override public synchronized long getBytesWritten() { return byteCountingOut == null ? 0L : byteCountingOut.getBytesWritten(); }
@Override public synchronized long getBytesWritten() { return super.getBytesWritten(); }
@Override public long getBytesWritten() { if (countingOut != null) { return countingOut.getBytesWritten(); } return 0L; }
public boolean isFullEnough() { readLock.lock(); try { if (flowFiles.isEmpty()) { return false; } int requiredRecordCount; final Optional<String> recordCountAttribute = thresholds.getRecordCountAttribute(); if (recordCountAttribute.isPresent()) { final String recordCountValue = flowFiles.get(0).getAttribute(recordCountAttribute.get()); try { requiredRecordCount = Integer.parseInt(recordCountValue); } catch (final NumberFormatException e) { requiredRecordCount = 1; } } else { requiredRecordCount = thresholds.getMinRecords(); } return (recordCount >= requiredRecordCount && out.getBytesWritten() >= thresholds.getMinBytes()); } finally { readLock.unlock(); } }
if (out.getBytesWritten() >= thresholds.getMaxBytes()) { return true;
@Override public long writeIntermediateFooter() throws IOException { // flush any buffered rows flushStripe(); // write a footer if (stripesAtLastFlush != stripes.size()) { if (callback != null) { callback.preFooterWrite(callbackContext); } int metaLength = writeMetadata(); int footLength = writeFooter(rawWriter.getBytesWritten() - metaLength); rawWriter.write(writePostScript(footLength, metaLength)); stripesAtLastFlush = stripes.size(); rawWriter.flush(); } return rawWriter.getBytesWritten(); }
@VisibleForTesting public OutputStream getStream() throws IOException { if (rawWriter == null) { rawWriter = new ByteCountingOutputStream(flowFileOutputStream); rawWriter.write(OrcFile.MAGIC.getBytes()); headerLength = rawWriter.getBytesWritten(); writer = new OutStream("metadata", bufferSize, codec, new DirectStream(rawWriter)); protobufWriter = CodedOutputStream.newInstance(writer); } return rawWriter; }
@Override public synchronized void writeHeader(final long firstEventId) throws IOException { if (isDirty()) { throw new IOException("Cannot update Provenance Repository because this Record Writer has already failed to write to the Repository"); } try { blockStartOffset = rawOutStream.getBytesWritten(); resetWriteStream(firstEventId); out.writeUTF(getSerializationName()); out.writeInt(getSerializationVersion()); writeHeader(firstEventId, out); out.flush(); blockStartOffset = getBytesWritten(); } catch (final IOException ioe) { markDirty(); throw ioe; } }
private int writeMetadata() throws IOException { getStream(); OrcProto.Metadata.Builder builder = OrcProto.Metadata.newBuilder(); for (OrcProto.StripeStatistics.Builder ssb : treeWriter.stripeStatsBuilders) { builder.addStripeStats(ssb.build()); } long startPosn = rawWriter.getBytesWritten(); OrcProto.Metadata metadata = builder.build(); metadata.writeTo(protobufWriter); protobufWriter.flush(); writer.flush(); return (int) (rawWriter.getBytesWritten() - startPosn); }
final long byteOffset = (byteCountingOut == null) ? rawOutStream.getBytesWritten() : byteCountingOut.getBytesWritten(); final TocWriter tocWriter = getTocWriter(); tocWriter.addBlockOffset(rawOutStream.getBytesWritten(), eventId); } else { if (tocWriter != null && eventId != null) { tocWriter.addBlockOffset(rawOutStream.getBytesWritten(), eventId);
@Override public long merge(final Collection<ContentClaim> claims, final ContentClaim destination, final byte[] header, final byte[] footer, final byte[] demarcator) throws IOException { if (claims.contains(destination)) { throw new IllegalArgumentException("destination cannot be within claims"); } try (final ByteCountingOutputStream out = new ByteCountingOutputStream(write(destination))) { if (header != null) { out.write(header); } int i = 0; for (final ContentClaim claim : claims) { try (final InputStream in = read(claim)) { StreamUtils.copy(in, out); } if (++i < claims.size() && demarcator != null) { out.write(demarcator); } } if (footer != null) { out.write(footer); } return out.getBytesWritten(); } }
@Override public synchronized StorageSummary writeRecord(final ProvenanceEventRecord record) throws IOException { if (isDirty()) { throw new IOException("Cannot update Provenance Repository because this Record Writer has already failed to write to the Repository"); } try { final long recordIdentifier = record.getEventId() == -1L ? idGenerator.getAndIncrement() : record.getEventId(); final long startBytes = byteCountingOut.getBytesWritten(); ensureStreamState(recordIdentifier, startBytes); writeRecord(record, recordIdentifier, out); recordCount++; final long bytesWritten = byteCountingOut.getBytesWritten(); final long serializedLength = bytesWritten - startBytes; final TocWriter tocWriter = getTocWriter(); final Integer blockIndex = tocWriter == null ? null : tocWriter.getCurrentBlockIndex(); final String storageLocation = getStorageLocation(); return new StorageSummary(recordIdentifier, storageLocation, blockIndex, serializedLength, bytesWritten); } catch (final IOException ioe) { markDirty(); throw ioe; } }
@Override public void close() throws IOException { if (closed) { return; } closed = true; writeRecursionSet.remove(sourceFlowFile); final long bytesWritten = countingOut.getBytesWritten(); StandardProcessSession.this.bytesWritten += bytesWritten; final OutputStream removed = openOutputStreams.remove(sourceFlowFile); if (removed == null) { LOG.error("Closed Session's OutputStream but there was no entry for it in the map; sourceFlowFile={}; map={}", sourceFlowFile, openOutputStreams); } flush(); removeTemporaryClaim(record); final FlowFileRecord newFile = new StandardFlowFileRecord.Builder() .fromFlowFile(record.getCurrent()) .contentClaim(updatedClaim) .contentClaimOffset(Math.max(0, updatedClaim.getLength() - bytesWritten)) .size(bytesWritten) .build(); record.setWorking(newFile); } };
final long blockStartOffset = byteCountingOut.getBytesWritten();
long start = rawWriter.getBytesWritten(); long stripeLen = length; long availBlockSpace = blockSize - (start % blockSize);
private int writePostScript(int footerLength, int metadataLength) throws IOException { OrcProto.PostScript.Builder builder = OrcProto.PostScript.newBuilder() .setCompression(writeCompressionKind(compress)) .setFooterLength(footerLength) .setMetadataLength(metadataLength) .setMagic(OrcFile.MAGIC) .addVersion(version.getMajor()) .addVersion(version.getMinor()) .setWriterVersion(OrcFile.WriterVersion.HIVE_8732.getId()); if (compress != CompressionKind.NONE) { builder.setCompressionBlockSize(bufferSize); } OrcProto.PostScript ps = builder.build(); // need to write this uncompressed long startPosn = rawWriter.getBytesWritten(); ps.writeTo(rawWriter); long length = rawWriter.getBytesWritten() - startPosn; if (length > 255) { throw new IllegalArgumentException("PostScript too large at " + length); } return (int) length; }
private int writeFooter(long bodyLength) throws IOException { getStream(); OrcProto.Footer.Builder builder = OrcProto.Footer.newBuilder(); builder.setContentLength(bodyLength); builder.setHeaderLength(headerLength); builder.setNumberOfRows(rowCount); builder.setRowIndexStride(rowIndexStride); // populate raw data size rawDataSize = computeRawDataSize(); // serialize the types writeTypes(builder, treeWriter); // add the stripe information for (OrcProto.StripeInformation stripe : stripes) { builder.addStripes(stripe); } // add the column statistics writeFileStatistics(builder, treeWriter); // add all of the user metadata for (Map.Entry<String, ByteString> entry : userMetadata.entrySet()) { builder.addMetadata(OrcProto.UserMetadataItem.newBuilder() .setName(entry.getKey()).setValue(entry.getValue())); } long startPosn = rawWriter.getBytesWritten(); OrcProto.Footer footer = builder.build(); footer.writeTo(protobufWriter); protobufWriter.flush(); writer.flush(); return (int) (rawWriter.getBytesWritten() - startPosn); }
@Override public void close() throws IOException { if (callback != null) { callback.preFooterWrite(callbackContext); } // remove us from the memory manager so that we don't get any callbacks memoryManager.removeWriter(path); // actually close the file flushStripe(); int metadataLength = writeMetadata(); int footerLength = writeFooter(rawWriter.getBytesWritten() - metadataLength); rawWriter.write(writePostScript(footerLength, metadataLength)); rawWriter.close(); }