public SafeMemory copy(long newSize) { SafeMemory copy = new SafeMemory(newSize); copy.put(0, this, 0, Math.min(size(), newSize)); return copy; }
/** * Get a chunk offset by it's index. * * @param chunkIndex Index of the chunk. * * @return offset of the chunk in the compressed file. */ public long chunkOffsetBy(int chunkIndex) { return offsets.getLong(chunkIndex * 8L); }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
@SuppressWarnings("resource") public CompressionMetadata open(long dataLength, long compressedLength) { SafeMemory offsets = this.offsets.sharedCopy(); // calculate how many entries we need, if our dataLength is truncated int count = (int) (dataLength / parameters.chunkLength()); if (dataLength % parameters.chunkLength() != 0) count++; assert count > 0; // grab our actual compressed length from the next offset from our the position we're opened to if (count < this.count) compressedLength = offsets.getLong(count * 8L); return new CompressionMetadata(filePath, parameters, offsets, count * 8L, dataLength, compressedLength, ChecksumType.CRC32); }
@SuppressWarnings("resource") public SafeMemoryWriter(long initialCapacity) { this(new SafeMemory(initialCapacity)); }
protected Throwable doPostCleanup(Throwable failed) { return offsets.close(failed); }
public long capacity() { return memory.size(); }
public IndexSummary build(IPartitioner partitioner, ReadableBoundary boundary) { assert entries.length() > 0; int count = (int) (offsets.length() / 4); long entriesLength = entries.length(); if (boundary != null) { count = boundary.summaryCount; entriesLength = boundary.entriesLength; } int sizeAtFullSampling = (int) Math.ceil(keysWritten / (double) minIndexInterval); assert count > 0; return new IndexSummary(partitioner, offsets.currentBuffer().sharedCopy(), count, entries.currentBuffer().sharedCopy(), entriesLength, sizeAtFullSampling, minIndexInterval, samplingLevel); }
public void add(Memory memory) { if (memory instanceof SafeMemory) ((SafeMemory) memory).addTo(this); } }
public void writeLong(long val) { if (order != ByteOrder.nativeOrder()) val = Long.reverseBytes(val); long newLength = ensureCapacity(8); buffer.setLong(length, val); length = newLength; }
public void write(Memory memory) { long newLength = ensureCapacity(memory.size()); buffer.put(length, memory, 0, memory.size()); length = newLength; }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
@Override protected void reallocate(long count) { long newCapacity = calculateNewSize(count); if (newCapacity != capacity()) { long position = length(); ByteOrder order = buffer.order(); SafeMemory oldBuffer = memory; memory = this.memory.copy(newCapacity); buffer = tailBuffer(memory); int newPosition = (int) (position - tailOffset(memory)); buffer.position(newPosition); buffer.order(order); oldBuffer.free(); } }
@SuppressWarnings("resource") public CompressionMetadata open(long dataLength, long compressedLength) { SafeMemory offsets = this.offsets.sharedCopy(); // calculate how many entries we need, if our dataLength is truncated int count = (int) (dataLength / parameters.chunkLength()); if (dataLength % parameters.chunkLength() != 0) count++; assert count > 0; // grab our actual compressed length from the next offset from our the position we're opened to if (count < this.count) compressedLength = offsets.getLong(count * 8L); return new CompressionMetadata(filePath, parameters, offsets, count * 8L, dataLength, compressedLength, ChecksumType.CRC32); }
@SuppressWarnings("resource") public SafeMemoryWriter(long initialCapacity) { this(new SafeMemory(initialCapacity)); }
public void abort() { if (offsets != null) offsets.close(); } }
public long capacity() { return memory.size(); }
public IndexSummary build(IPartitioner partitioner, ReadableBoundary boundary) { assert entries.length() > 0; int count = (int) (offsets.length() / 4); long entriesLength = entries.length(); if (boundary != null) { count = boundary.summaryCount; entriesLength = boundary.entriesLength; } int sizeAtFullSampling = (int) Math.ceil(keysWritten / (double) minIndexInterval); assert count > 0; return new IndexSummary(partitioner, offsets.currentBuffer().sharedCopy(), count, entries.currentBuffer().sharedCopy(), entriesLength, sizeAtFullSampling, minIndexInterval, samplingLevel); }