public long capacity() { return memory.size(); }
public long capacity() { return memory.size(); }
public long capacity() { return buffer.size(); }
public long capacity() { return memory.size(); }
public long capacity() { return memory.size(); }
public SafeMemory copy(long newSize) { SafeMemory copy = new SafeMemory(newSize); copy.put(0, this, 0, Math.min(size(), newSize)); return copy; }
public SafeMemory copy(long newSize) { SafeMemory copy = new SafeMemory(newSize); copy.put(0, this, 0, Math.min(size(), newSize)); return copy; }
public SafeMemory copy(long newSize) { SafeMemory copy = new SafeMemory(newSize); copy.put(0, this, 0, Math.min(size(), newSize)); return copy; }
public SafeMemory copy(long newSize) { SafeMemory copy = new SafeMemory(newSize); copy.put(0, this, 0, Math.min(size(), newSize)); return copy; }
public SafeMemory copy(long newSize) { SafeMemory copy = new SafeMemory(newSize); copy.put(0, this, 0, Math.min(size(), newSize)); return copy; }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (HadoopFileUtils.HadoopFileChannel hos = HadoopFileUtils.newFilesystemChannel(filePath); DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(hos)) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); //fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
if (this.offsets.size() != count * 8L)