public static SequentialWriter open(File file, int bufferSize) { return new SequentialWriter(file, bufferSize); }
public ChecksummedSequentialWriter(File file, File crcPath, File digestFile, SequentialWriterOption option) { super(file, option); crcWriter = new SequentialWriter(crcPath, CRC_WRITER_OPTION); crcMetadata = new ChecksumWriter(crcWriter); crcMetadata.writeChunkSize(buffer.capacity()); this.digestFile = Optional.ofNullable(digestFile); }
public ChecksummedSequentialWriter(File file, File crcPath, File digestFile, SequentialWriterOption option) { super(file, option); crcWriter = new SequentialWriter(crcPath, CRC_WRITER_OPTION); crcMetadata = new ChecksumWriter(crcWriter); crcMetadata.writeChunkSize(buffer.capacity()); this.digestFile = Optional.ofNullable(digestFile); }
public ChecksummedSequentialWriter(File file, File crcPath, File digestFile, SequentialWriterOption option) { super(file, option); crcWriter = new SequentialWriter(crcPath, CRC_WRITER_OPTION); crcMetadata = new ChecksumWriter(crcWriter); crcMetadata.writeChunkSize(buffer.capacity()); this.digestFile = Optional.ofNullable(digestFile); }
public ChecksummedSequentialWriter(String file, String crcPath, String digestFile, SequentialWriterOption option) { super(file, option); crcWriter = new SequentialWriter(crcPath, CRC_WRITER_OPTION); crcMetadata = new ChecksumWriter(crcWriter); crcMetadata.writeChunkSize(buffer.capacity()); this.digestFile = Optional.ofNullable(digestFile); }
public ChecksummedSequentialWriter(File file, int bufferSize, File crcPath) { super(file, bufferSize); crcWriter = new SequentialWriter(crcPath, 8 * 1024); crcMetadata = new DataIntegrityMetadata.ChecksumWriter(crcWriter.stream); crcMetadata.writeChunkSize(buffer.length); }
private void writeMetadata(Descriptor desc, Map<MetadataType, MetadataComponent> components) { File file = new File(desc.filenameFor(Component.STATS)); try (SequentialWriter out = new SequentialWriter(file, writerOption)) { desc.getMetadataSerializer().serialize(components, out, desc.version); out.finish(); } catch (IOException e) { throw new FSWriteError(e, file.getPath()); } }
private void writeMetadata(Descriptor desc, Map<MetadataType, MetadataComponent> components) { File file = new File(desc.filenameFor(Component.STATS)); try (SequentialWriter out = new SequentialWriter(file, writerOption)) { desc.getMetadataSerializer().serialize(components, out, desc.version); out.finish(); } catch (IOException e) { throw new FSWriteError(e, file.getPath()); } }
private void writeMetadata(Descriptor desc, Map<MetadataType, MetadataComponent> components) { File file = new File(desc.filenameFor(Component.STATS)); try (SequentialWriter out = new SequentialWriter(file, writerOption)) { desc.getMetadataSerializer().serialize(components, out, desc.version); out.finish(); } catch (IOException e) { throw new FSWriteError(e, file.getPath()); } }
private void writeMetadata(Descriptor desc, Map<MetadataType, MetadataComponent> components) { File file = new File(desc.filenameFor(Component.STATS)); //try (SequentialWriter out = new SequentialWriter(file, writerOption)) try (SequentialWriter out = new SequentialWriter(desc.filenameFor(Component.STATS), writerOption)) { desc.getMetadataSerializer().serialize(components, out, desc.version); out.finish(); } catch (IOException e) { throw new FSWriteError(e, file.getPath()); } }
IndexWriter(long keyCount) { indexFile = new SequentialWriter(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), writerOption); builder = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)).mmapped(DatabaseDescriptor.getIndexAccessMode() == Config.DiskAccessMode.mmap); chunkCache.ifPresent(builder::withChunkCache); summary = new IndexSummaryBuilder(keyCount, metadata.params.minIndexInterval, Downsampling.BASE_SAMPLING_LEVEL); bf = FilterFactory.getFilter(keyCount, metadata.params.bloomFilterFpChance, true, descriptor.version.hasOldBfHashOrder()); // register listeners to be alerted when the data files are flushed indexFile.setPostFlushListener(() -> summary.markIndexSynced(indexFile.getLastFlushOffset())); dataFile.setPostFlushListener(() -> summary.markDataSynced(dataFile.getLastFlushOffset())); }
IndexWriter(long keyCount) { indexFile = new SequentialWriter(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), writerOption); builder = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)).mmapped(DatabaseDescriptor.getIndexAccessMode() == Config.DiskAccessMode.mmap); chunkCache.ifPresent(builder::withChunkCache); summary = new IndexSummaryBuilder(keyCount, metadata.params.minIndexInterval, Downsampling.BASE_SAMPLING_LEVEL); bf = FilterFactory.getFilter(keyCount, metadata.params.bloomFilterFpChance, true, descriptor.version.hasOldBfHashOrder()); // register listeners to be alerted when the data files are flushed indexFile.setPostFlushListener(() -> summary.markIndexSynced(indexFile.getLastFlushOffset())); dataFile.setPostFlushListener(() -> summary.markDataSynced(dataFile.getLastFlushOffset())); }
IndexWriter(long keyCount) { indexFile = new SequentialWriter(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), writerOption); builder = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)).mmapped(DatabaseDescriptor.getIndexAccessMode() == Config.DiskAccessMode.mmap); chunkCache.ifPresent(builder::withChunkCache); summary = new IndexSummaryBuilder(keyCount, metadata.params.minIndexInterval, Downsampling.BASE_SAMPLING_LEVEL); bf = FilterFactory.getFilter(keyCount, metadata.params.bloomFilterFpChance, true, descriptor.version.hasOldBfHashOrder()); // register listeners to be alerted when the data files are flushed indexFile.setPostFlushListener(() -> summary.markIndexSynced(indexFile.getLastFlushOffset())); dataFile.setPostFlushListener(() -> summary.markDataSynced(dataFile.getLastFlushOffset())); }
IndexWriter(long keyCount) { //indexFile = new SequentialWriter(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), writerOption); indexFile = new SequentialWriter(descriptor.filenameFor(Component.PRIMARY_INDEX), writerOption); builder = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)); chunkCache.ifPresent(builder::withChunkCache); summary = new IndexSummaryBuilder(keyCount, metadata.params.minIndexInterval, Downsampling.BASE_SAMPLING_LEVEL); bf = FilterFactory.getFilter(keyCount, metadata.params.bloomFilterFpChance, true, descriptor.version.hasOldBfHashOrder()); // register listeners to be alerted when the data files are flushed indexFile.setPostFlushListener(() -> summary.markIndexSynced(indexFile.getLastFlushOffset())); dataFile.setPostFlushListener(() -> summary.markDataSynced(dataFile.getLastFlushOffset())); }
out = new SequentialWriter(file, WRITER_OPTION);
out = new SequentialWriter(file, WRITER_OPTION);
out = new SequentialWriter(file, WRITER_OPTION);