/** * Flush ready buffer to persistent store. currentBuffer is not flushed as it * accumulates new log records while readyBuffer will be flushed and synced. */ @Override public void flushAndSync(boolean durable) throws IOException { if (fp == null) { throw new IOException("Trying to use aborted output stream"); } if (doubleBuf.isFlushed()) { LOG.info("Nothing to flush"); return; } preallocate(); // preallocate file if necessary doubleBuf.flushTo(fp); if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) { fc.force(false); // metadata updates not needed } }
@Override // EditLogOutputStream protected void flushAndSync(boolean durable) throws IOException { assert out.getLength() == 0 : "Output buffer is not empty"; if (doubleBuf.isFlushed()) { LOG.info("Nothing to flush"); return; } int numReadyTxns = doubleBuf.countReadyTxns(); long firstTxToFlush = doubleBuf.getFirstReadyTxId(); doubleBuf.flushTo(out); if (out.getLength() > 0) { assert numReadyTxns > 0; byte[] data = Arrays.copyOf(out.getData(), out.getLength()); out.reset(); assert out.getLength() == 0 : "Output buffer is not empty"; backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data); } }
buf.flushTo(bufToSend); assert bufToSend.getLength() == numReadyBytes; byte[] data = bufToSend.getData();
/** * Flush ready buffer to persistent store. currentBuffer is not flushed as it * accumulates new log records while readyBuffer will be flushed and synced. */ @Override protected void flushAndSync() throws IOException { if (fp == null) { throw new IOException("Trying to use aborted output stream"); } preallocate(); // preallocate file if necessary if (doubleBuf.isFlushed()) { return; } doubleBuf.flushTo(fp); fc.force(false); // metadata updates not needed fc.position(fc.position() - 1); // skip back the end-of-file marker }
/** * Flush ready buffer to persistent store. currentBuffer is not flushed as it * accumulates new log records while readyBuffer will be flushed and synced. */ @Override public void flushAndSync(boolean durable) throws IOException { if (fp == null) { throw new IOException("Trying to use aborted output stream"); } if (doubleBuf.isFlushed()) { LOG.info("Nothing to flush"); return; } preallocate(); // preallocate file if necessary doubleBuf.flushTo(fp); if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) { fc.force(false); // metadata updates not needed } }
/** * Flush ready buffer to persistent store. currentBuffer is not flushed as it * accumulates new log records while readyBuffer will be flushed and synced. */ @Override public void flushAndSync(boolean durable) throws IOException { if (fp == null) { throw new IOException("Trying to use aborted output stream"); } if (doubleBuf.isFlushed()) { LOG.info("Nothing to flush"); return; } preallocate(); // preallocate file if necessary doubleBuf.flushTo(fp); if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) { fc.force(false); // metadata updates not needed } }
@Override // EditLogOutputStream protected void flushAndSync(boolean durable) throws IOException { assert out.getLength() == 0 : "Output buffer is not empty"; if (doubleBuf.isFlushed()) { LOG.info("Nothing to flush"); return; } int numReadyTxns = doubleBuf.countReadyTxns(); long firstTxToFlush = doubleBuf.getFirstReadyTxId(); doubleBuf.flushTo(out); if (out.getLength() > 0) { assert numReadyTxns > 0; byte[] data = Arrays.copyOf(out.getData(), out.getLength()); out.reset(); assert out.getLength() == 0 : "Output buffer is not empty"; backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data); } }
@Override // EditLogOutputStream protected void flushAndSync(boolean durable) throws IOException { assert out.getLength() == 0 : "Output buffer is not empty"; if (doubleBuf.isFlushed()) { LOG.info("Nothing to flush"); return; } int numReadyTxns = doubleBuf.countReadyTxns(); long firstTxToFlush = doubleBuf.getFirstReadyTxId(); doubleBuf.flushTo(out); if (out.getLength() > 0) { assert numReadyTxns > 0; byte[] data = Arrays.copyOf(out.getData(), out.getLength()); out.reset(); assert out.getLength() == 0 : "Output buffer is not empty"; backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data); } }
@Override protected void flushAndSync(boolean durable) throws IOException { int numReadyBytes = buf.countReadyBytes(); if (numReadyBytes > 0) { int numReadyTxns = buf.countReadyTxns(); long firstTxToFlush = buf.getFirstReadyTxId(); assert numReadyTxns > 0; // Copy from our double-buffer into a new byte array. This is for // two reasons: // 1) The IPC code has no way of specifying to send only a slice of // a larger array. // 2) because the calls to the underlying nodes are asynchronous, we // need a defensive copy to avoid accidentally mutating the buffer // before it is sent. DataOutputBuffer bufToSend = new DataOutputBuffer(numReadyBytes); buf.flushTo(bufToSend); assert bufToSend.getLength() == numReadyBytes; byte[] data = bufToSend.getData(); assert data.length == bufToSend.getLength(); QuorumCall<AsyncLogger, Void> qcall = loggers.sendEdits( segmentTxId, firstTxToFlush, numReadyTxns, data); loggers.waitForWriteQuorum(qcall, writeTimeoutMs, "sendEdits"); // Since we successfully wrote this batch, let the loggers know. Any future // RPCs will thus let the loggers know of the most recent transaction, even // if a logger has fallen behind. loggers.setCommittedTxId(firstTxToFlush + numReadyTxns - 1); } }
@Override protected void flushAndSync(boolean durable) throws IOException { int numReadyBytes = buf.countReadyBytes(); if (numReadyBytes > 0) { int numReadyTxns = buf.countReadyTxns(); long firstTxToFlush = buf.getFirstReadyTxId(); assert numReadyTxns > 0; // Copy from our double-buffer into a new byte array. This is for // two reasons: // 1) The IPC code has no way of specifying to send only a slice of // a larger array. // 2) because the calls to the underlying nodes are asynchronous, we // need a defensive copy to avoid accidentally mutating the buffer // before it is sent. DataOutputBuffer bufToSend = new DataOutputBuffer(numReadyBytes); buf.flushTo(bufToSend); assert bufToSend.getLength() == numReadyBytes; byte[] data = bufToSend.getData(); assert data.length == bufToSend.getLength(); QuorumCall<AsyncLogger, Void> qcall = loggers.sendEdits( segmentTxId, firstTxToFlush, numReadyTxns, data); loggers.waitForWriteQuorum(qcall, writeTimeoutMs, "sendEdits"); // Since we successfully wrote this batch, let the loggers know. Any future // RPCs will thus let the loggers know of the most recent transaction, even // if a logger has fallen behind. loggers.setCommittedTxId(firstTxToFlush + numReadyTxns - 1); } }