/** * Get a chunk offset by it's index. * * @param chunkIndex Index of the chunk. * * @return offset of the chunk in the compressed file. */ public long chunkOffsetBy(int chunkIndex) { return offsets.getLong(chunkIndex * 8L); }
/** * Get a chunk offset by it's index. * * @param chunkIndex Index of the chunk. * * @return offset of the chunk in the compressed file. */ public long chunkOffsetBy(int chunkIndex) { return offsets.getLong(chunkIndex * 8L); }
/** * Get a chunk offset by it's index. * * @param chunkIndex Index of the chunk. * * @return offset of the chunk in the compressed file. */ public long chunkOffsetBy(int chunkIndex) { return offsets.getLong(chunkIndex * 8L); }
/** * Get a chunk offset by it's index. * * @param chunkIndex Index of the chunk. * * @return offset of the chunk in the compressed file. */ public long chunkOffsetBy(int chunkIndex) { return offsets.getLong(chunkIndex * 8L); }
/** * Get a chunk offset by it's index. * * @param chunkIndex Index of the chunk. * * @return offset of the chunk in the compressed file. */ public long chunkOffsetBy(int chunkIndex) { return offsets.getLong(chunkIndex * 8L); }
public void close(long dataLength, int chunks) throws IOException { FileOutputStream fos = null; DataOutputStream out = null; try { fos = new FileOutputStream(filePath); out = new DataOutputStream(new BufferedOutputStream(fos)); assert chunks == count; writeHeader(out, dataLength, chunks); for (int i = 0 ; i < count ; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } finally { FileUtils.closeQuietly(out); } }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (FileOutputStream fos = new FileOutputStream(filePath); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
@SuppressWarnings("resource") public CompressionMetadata open(long dataLength, long compressedLength) { SafeMemory offsets = this.offsets.sharedCopy(); // calculate how many entries we need, if our dataLength is truncated int count = (int) (dataLength / parameters.chunkLength()); if (dataLength % parameters.chunkLength() != 0) count++; assert count > 0; // grab our actual compressed length from the next offset from our the position we're opened to if (count < this.count) compressedLength = offsets.getLong(count * 8L); return new CompressionMetadata(filePath, parameters, offsets, count * 8L, dataLength, compressedLength, ChecksumType.CRC32); }
@SuppressWarnings("resource") public CompressionMetadata open(long dataLength, long compressedLength) { SafeMemory offsets = this.offsets.sharedCopy(); // calculate how many entries we need, if our dataLength is truncated int count = (int) (dataLength / parameters.chunkLength()); if (dataLength % parameters.chunkLength() != 0) count++; assert count > 0; // grab our actual compressed length from the next offset from our the position we're opened to if (count < this.count) compressedLength = offsets.getLong(count * 8L); return new CompressionMetadata(filePath, parameters, offsets, count * 8L, dataLength, compressedLength, ChecksumType.CRC32); }
@SuppressWarnings("resource") public CompressionMetadata open(long dataLength, long compressedLength) { SafeMemory offsets = this.offsets.sharedCopy(); // calculate how many entries we need, if our dataLength is truncated int count = (int) (dataLength / parameters.chunkLength()); if (dataLength % parameters.chunkLength() != 0) count++; assert count > 0; // grab our actual compressed length from the next offset from our the position we're opened to if (count < this.count) compressedLength = offsets.getLong(count * 8L); return new CompressionMetadata(filePath, parameters, offsets, count * 8L, dataLength, compressedLength, ChecksumType.CRC32); }
@SuppressWarnings("resource") public CompressionMetadata open(long dataLength, long compressedLength) { SafeMemory offsets = this.offsets.sharedCopy(); // calculate how many entries we need, if our dataLength is truncated int count = (int) (dataLength / parameters.chunkLength()); if (dataLength % parameters.chunkLength() != 0) count++; assert count > 0; // grab our actual compressed length from the next offset from our the position we're opened to if (count < this.count) compressedLength = offsets.getLong(count * 8L); return new CompressionMetadata(filePath, parameters, offsets, count * 8L, dataLength, compressedLength, ChecksumType.CRC32); }
public void doPrepare() { assert chunkCount == count; // finalize the size of memory used if it won't now change; // unnecessary if already correct size if (offsets.size() != count * 8L) { SafeMemory tmp = offsets; offsets = offsets.copy(count * 8L); tmp.free(); } // flush the data to disk try (HadoopFileUtils.HadoopFileChannel hos = HadoopFileUtils.newFilesystemChannel(filePath); DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(hos)) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); //fos.getFD().sync(); } catch (IOException e) { throw Throwables.propagate(e); } }
compressedLength = offsets.getLong(count * 8L); break;