/** * There is no persistent storage. Just clear the buffers. */ @Override // EditLogOutputStream public void create(int layoutVersion) throws IOException { assert doubleBuf.isFlushed() : "previous data is not flushed yet"; this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE); }
@Override // EditLogOutputStream public void close() throws IOException { // close should have been called after all pending transactions // have been flushed & synced. int size = doubleBuf.countBufferedBytes(); if (size != 0) { throw new IOException("BackupEditStream has " + size + " records still to be flushed and cannot be closed."); } RPC.stopProxy(backupNode); // stop the RPC threads doubleBuf.close(); doubleBuf = null; }
@Override // EditLogOutputStream protected void flushAndSync(boolean durable) throws IOException { assert out.getLength() == 0 : "Output buffer is not empty"; if (doubleBuf.isFlushed()) { LOG.info("Nothing to flush"); return; } int numReadyTxns = doubleBuf.countReadyTxns(); long firstTxToFlush = doubleBuf.getFirstReadyTxId(); doubleBuf.flushTo(out); if (out.getLength() > 0) { assert numReadyTxns > 0; byte[] data = Arrays.copyOf(out.getData(), out.getLength()); out.reset(); assert out.getLength() == 0 : "Output buffer is not empty"; backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data); } }
/** * Flush ready buffer to persistent store. currentBuffer is not flushed as it * accumulates new log records while readyBuffer will be flushed and synced. */ @Override public void flushAndSync(boolean durable) throws IOException { if (fp == null) { throw new IOException("Trying to use aborted output stream"); } if (doubleBuf.isFlushed()) { LOG.info("Nothing to flush"); return; } preallocate(); // preallocate file if necessary doubleBuf.flushTo(fp); if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) { fc.force(false); // metadata updates not needed } }
@Test public void testDoubleBuffer() throws IOException { EditsDoubleBuffer buf = new EditsDoubleBuffer(1024); assertTrue(buf.isFlushed()); byte[] data = new byte[100]; buf.writeRaw(data, 0, data.length); assertEquals("Should count new data correctly", data.length, buf.countBufferedBytes()); buf.isFlushed()); buf.setReadyToFlush(); assertEquals("Swapping buffers should still count buffered bytes", data.length, buf.countBufferedBytes()); assertFalse(buf.isFlushed()); buf.flushTo(outBuf); assertEquals(data.length, outBuf.getLength()); assertTrue(buf.isFlushed()); assertEquals(0, buf.countBufferedBytes()); buf.writeRaw(data, 0, data.length); assertEquals("Should count new data correctly", data.length, buf.countBufferedBytes()); buf.setReadyToFlush(); buf.flushTo(outBuf); assertEquals(0, buf.countBufferedBytes());
@Override protected void flushAndSync(boolean durable) throws IOException { int numReadyBytes = buf.countReadyBytes(); if (numReadyBytes > 0) { int numReadyTxns = buf.countReadyTxns(); long firstTxToFlush = buf.getFirstReadyTxId(); buf.flushTo(bufToSend); assert bufToSend.getLength() == numReadyBytes; byte[] data = bufToSend.getData();
@Test public void shouldFailToCloseWhenUnflushed() throws IOException { EditsDoubleBuffer buf = new EditsDoubleBuffer(1024); buf.writeRaw(new byte[1], 0, 1); try { buf.close(); fail("Did not fail to close with unflushed data"); } catch (IOException ioe) { if (!ioe.toString().contains("still to be flushed")) { throw ioe; } } } }
public QuorumOutputStream(AsyncLoggerSet loggers, long txId, int outputBufferCapacity, int writeTimeoutMs, boolean updateCommittedTxId) throws IOException { super(); this.buf = new EditsDoubleBuffer(outputBufferCapacity); this.loggers = loggers; this.segmentTxId = txId; this.writeTimeoutMs = writeTimeoutMs; this.updateCommittedTxId = updateCommittedTxId; }
/** * All data that has been written to the stream so far will be flushed. New * data can be still written to the stream while flushing is performed. */ @Override public void setReadyToFlush() throws IOException { doubleBuf.setReadyToFlush(); }
@Override public void close() throws IOException { if (buf != null) { buf.close(); buf = null; } }
@Override public void write(FSEditLogOp op) throws IOException { buf.writeOp(op); }
/** * All data that has been written to the stream so far will be flushed. New * data can be still written to the stream while flushing is performed. */ @Override public void setReadyToFlush() throws IOException { doubleBuf.getCurrentBuf().write(FSEditLogOpCodes.OP_INVALID.getOpCode()); // insert eof marker doubleBuf.setReadyToFlush(); }
public void setReadyToFlush() { assert isFlushed() : "previous data not flushed yet"; TxnBuffer tmp = bufReady; bufReady = bufCurrent; bufCurrent = tmp; }
/** * Write a transaction to the stream. The serialization format is: * <ul> * <li>the opcode (byte)</li> * <li>the transaction id (long)</li> * <li>the actual Writables for the transaction</li> * </ul> * */ @Override public void writeRaw(byte[] bytes, int offset, int length) throws IOException { doubleBuf.writeRaw(bytes, offset, length); }
/** * Create empty edits logs file. */ @Override public void create(int layoutVersion) throws IOException { fc.truncate(0); fc.position(0); writeHeader(layoutVersion, doubleBuf.getCurrentBuf()); setReadyToFlush(); flush(); }
/** * @return true if the number of buffered data exceeds the intial buffer size */ @Override public boolean shouldForceSync() { return doubleBuf.shouldForceSync(); }
/** * Return the size of the current edit log including buffered data. */ @Override long length() throws IOException { // file size + size of both buffers return fc.size() + doubleBuf.countBufferedBytes(); }
@Override protected void flushAndSync(boolean durable) throws IOException { int numReadyBytes = buf.countReadyBytes(); if (numReadyBytes > 0) { int numReadyTxns = buf.countReadyTxns(); long firstTxToFlush = buf.getFirstReadyTxId(); assert numReadyTxns > 0; // Copy from our double-buffer into a new byte array. This is for // two reasons: // 1) The IPC code has no way of specifying to send only a slice of // a larger array. // 2) because the calls to the underlying nodes are asynchronous, we // need a defensive copy to avoid accidentally mutating the buffer // before it is sent. DataOutputBuffer bufToSend = new DataOutputBuffer(numReadyBytes); buf.flushTo(bufToSend); assert bufToSend.getLength() == numReadyBytes; byte[] data = bufToSend.getData(); assert data.length == bufToSend.getLength(); QuorumCall<AsyncLogger, Void> qcall = loggers.sendEdits( segmentTxId, firstTxToFlush, numReadyTxns, data); loggers.waitForWriteQuorum(qcall, writeTimeoutMs, "sendEdits"); // Since we successfully wrote this batch, let the loggers know. Any future // RPCs will thus let the loggers know of the most recent transaction, even // if a logger has fallen behind. loggers.setCommittedTxId(firstTxToFlush + numReadyTxns - 1); } }
/** * Flush ready buffer to persistent store. currentBuffer is not flushed as it * accumulates new log records while readyBuffer will be flushed and synced. */ @Override protected void flushAndSync() throws IOException { if (fp == null) { throw new IOException("Trying to use aborted output stream"); } preallocate(); // preallocate file if necessary if (doubleBuf.isFlushed()) { return; } doubleBuf.flushTo(fp); fc.force(false); // metadata updates not needed fc.position(fc.position() - 1); // skip back the end-of-file marker }
/** * Creates output buffers and file object. * * @param conf * Configuration object * @param name * File name to store edit log * @param size * Size of flush buffer * @throws IOException */ public EditLogFileOutputStream(Configuration conf, File name, int size) throws IOException { super(); shouldSyncWritesAndSkipFsync = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT); file = name; doubleBuf = new EditsDoubleBuffer(size); RandomAccessFile rp; if (shouldSyncWritesAndSkipFsync) { rp = new RandomAccessFile(name, "rws"); } else { rp = new RandomAccessFile(name, "rw"); } fp = new FileOutputStream(rp.getFD()); // open for append fc = rp.getChannel(); fc.position(fc.size()); }