public static SequentialWriter open(File file, int bufferSize) { return new SequentialWriter(file, bufferSize); }
protected void flushInternal() { if (isDirty) { flushData(); if (trickleFsync) { bytesSinceTrickleFsync += validBufferBytes; if (bytesSinceTrickleFsync >= trickleFsyncByteInterval) { syncDataOnlyInternal(); bytesSinceTrickleFsync = 0; } } // Remember that we wrote, so we don't write it again on next flush(). resetBuffer(); isDirty = false; } }
protected void syncInternal() { doFlush(0); syncDataOnlyInternal(); }
protected void reBuffer() { flushInternal(); resetBuffer(); }
public void write(int value) throws ClosedChannelException { if (current >= bufferOffset + buffer.length) reBuffer(); assert current < bufferOffset + buffer.length : String.format("File (%s) offset %d, buffer offset %d.", getPath(), current, bufferOffset); buffer[bufferCursor()] = (byte) value; validBufferBytes += 1; current += 1; isDirty = true; syncNeeded = true; }
out = new SequentialWriter(file, WRITER_OPTION); out.writeUTF(descriptor.version.toString()); out.writeShort(termSize.size); ByteBufferUtil.writeWithShortLength(range.right, out); out.writeUTF(mode.toString()); out.writeBoolean(marksPartials); out.skipBytes((int) (BLOCK_SIZE - out.position())); final long levelIndexPosition = out.position(); out.writeInt(levels.size()); for (int i = levels.size() - 1; i >= 0; i--) levels.get(i).flushMetadata(); out.writeLong(levelIndexPosition); out.sync();
public void resetAndTruncate(FileMark mark) { assert mark instanceof BufferedFileWriterMark; long previous = current; current = ((BufferedFileWriterMark) mark).pointer; if (previous - current <= validBufferBytes) // current buffer { validBufferBytes = validBufferBytes - ((int) (previous - current)); return; } // synchronize current buffer with disk // because we don't want any data loss syncInternal(); // truncate file to given position truncate(current); // reset channel position try { out.seek(current); } catch (IOException e) { throw new FSReadError(e, getPath()); } resetBuffer(); }
IndexWriter(long keyCount) { //indexFile = new SequentialWriter(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)), writerOption); indexFile = new SequentialWriter(descriptor.filenameFor(Component.PRIMARY_INDEX), writerOption); builder = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)); chunkCache.ifPresent(builder::withChunkCache); summary = new IndexSummaryBuilder(keyCount, metadata.params.minIndexInterval, Downsampling.BASE_SAMPLING_LEVEL); bf = FilterFactory.getFilter(keyCount, metadata.params.bloomFilterFpChance, true, descriptor.version.hasOldBfHashOrder()); // register listeners to be alerted when the data files are flushed indexFile.setPostFlushListener(() -> summary.markIndexSynced(indexFile.getLastFlushOffset())); dataFile.setPostFlushListener(() -> summary.markDataSynced(dataFile.getLastFlushOffset())); }
protected void doPrepare() { flushBf(); // truncate index file long position = indexFile.position(); indexFile.prepareToCommit(); FileUtils.truncate(indexFile.getPath(), position); // save summary summary.prepareToCommit(); try (IndexSummary indexSummary = summary.build(getPartitioner())) { SSTableReader.saveSummary(descriptor, first, last, indexSummary); } }
public void truncate(long toSize) { try { fchannel.truncate(toSize); lastFlushOffset = toSize; } catch (IOException e) { throw new FSWriteError(e, getPath()); } }
private void writeMetadata(Descriptor desc, Map<MetadataType, MetadataComponent> components) { File file = new File(desc.filenameFor(Component.STATS)); try (SequentialWriter out = new SequentialWriter(file, writerOption)) { desc.getMetadataSerializer().serialize(components, out, desc.version); out.finish(); } catch (IOException e) { throw new FSWriteError(e, file.getPath()); } }
public void mark() { mark = indexFile.mark(); }
public long getOnDiskFilePointer() { return dataFile.getOnDiskFilePointer(); }
public void append(DecoratedKey key, RowIndexEntry indexEntry, long dataEnd, ByteBuffer indexInfo) throws IOException { bf.add(key); long indexStart = indexFile.position(); try { ByteBufferUtil.writeWithShortLength(key.getKey(), indexFile); rowIndexEntrySerializer.serialize(indexEntry, indexFile, indexInfo); } catch (IOException e) { throw new FSWriteError(e, indexFile.getPath()); } long indexEnd = indexFile.position(); if (logger.isTraceEnabled()) logger.trace("wrote index entry: {} at {}", indexEntry, indexStart); summary.maybeAddEntry(key, indexStart, indexEnd, dataEnd); }
@Override protected void flushData() { super.flushData(); ByteBuffer toAppend = buffer.duplicate(); toAppend.position(0); toAppend.limit(buffer.position()); crcMetadata.appendDirect(toAppend, false); }
private static void writeMetadata(Descriptor desc, Map<MetadataType, MetadataComponent> components) { SequentialWriter out = SequentialWriter.open(new File(desc.filenameFor(Component.STATS))); try { desc.getMetadataSerializer().serialize(components, out.stream); } catch (IOException e) { throw new FSWriteError(e, out.getPath()); } finally { out.close(); } }
protected void syncInternal() { if (syncNeeded) { flushInternal(); syncDataOnlyInternal(); if (!directorySynced) { CLibrary.trySync(directoryFD); directorySynced = true; } syncNeeded = false; } }
public void run() { summary.markDataSynced(dataFile.getLastFlushOffset()); } });