public void setCapacity(long newCapacity) { if (newCapacity != capacity()) { SafeMemory oldBuffer = buffer; buffer = this.buffer.copy(newCapacity); oldBuffer.free(); } }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
@Override protected void reallocate(long count) { long newCapacity = calculateNewSize(count); if (newCapacity != capacity()) { long position = length(); ByteOrder order = buffer.order(); SafeMemory oldBuffer = memory; memory = this.memory.copy(newCapacity); buffer = tailBuffer(memory); int newPosition = (int) (position - tailOffset(memory)); buffer.position(newPosition); buffer.order(order); oldBuffer.free(); } }
@Override protected void reallocate(long count) { long newCapacity = calculateNewSize(count); if (newCapacity != capacity()) { long position = length(); ByteOrder order = buffer.order(); SafeMemory oldBuffer = memory; memory = this.memory.copy(newCapacity); buffer = tailBuffer(memory); int newPosition = (int) (position - tailOffset(memory)); buffer.position(newPosition); buffer.order(order); oldBuffer.free(); } }
@Override protected void reallocate(long count) { long newCapacity = calculateNewSize(count); if (newCapacity != capacity()) { long position = length(); ByteOrder order = buffer.order(); SafeMemory oldBuffer = memory; memory = this.memory.copy(newCapacity); buffer = tailBuffer(memory); int newPosition = (int) (position - tailOffset(memory)); buffer.position(newPosition); buffer.order(order); oldBuffer.free(); } }
@Override protected void reallocate(long count) { long newCapacity = calculateNewSize(count); if (newCapacity != capacity()) { long position = length(); ByteOrder order = buffer.order(); SafeMemory oldBuffer = memory; memory = this.memory.copy(newCapacity); buffer = tailBuffer(memory); int newPosition = (int) (position - tailOffset(memory)); buffer.position(newPosition); buffer.order(order); oldBuffer.free(); } }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (HadoopFileUtils.HadoopFileChannel hos = HadoopFileUtils.newFilesystemChannel(filePath); DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(hos)) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); //fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
this.offsets.free(); this.offsets = tmp;