/** * the last logged zxid on the transaction logs * @return the last logged zxid */ public long getLastLoggedZxid() { FileTxnLog txnLog = new FileTxnLog(dataDir); return txnLog.getLastLoggedZxid(); }
/** * Get TxnIterator for iterating through txnlog starting at a given zxid * * @param zxid starting zxid * @param fastForward true if the iterator should be fast forwarded to point * to the txn of a given zxid, else the iterator will point to the * starting txn of a txnlog that may contain txn of a given zxid * @return TxnIterator * @throws IOException */ public TxnIterator readTxnLog(long zxid, boolean fastForward) throws IOException { FileTxnLog txnLog = new FileTxnLog(dataDir); return txnLog.read(zxid, fastForward); }
/** * the last logged zxid on the transaction logs * @return the last logged zxid */ public long getLastLoggedZxid() { FileTxnLog txnLog = new FileTxnLog(dataDir); return txnLog.getLastLoggedZxid(); }
/** * truncate the transaction logs the zxid * specified * @param zxid the zxid to truncate the logs to * @return true if able to truncate the log, false if not * @throws IOException */ public boolean truncateLog(long zxid) throws IOException { // close the existing txnLog and snapLog close(); // truncate it FileTxnLog truncLog = new FileTxnLog(dataDir); boolean truncated = truncLog.truncate(zxid); truncLog.close(); // re-open the txnLog and snapLog // I'd rather just close/reopen this object itself, however that // would have a big impact outside ZKDatabase as there are other // objects holding a reference to this object. txnLog = new FileTxnLog(dataDir); snapLog = new FileSnap(snapDir); return truncated; }
PlayBackListener listener) throws IOException { long deserializeResult = snapLog.deserialize(dt, sessions); FileTxnLog txnLog = new FileTxnLog(dataDir); boolean trustEmptyDB; File initFile = new File(dataDir.getParent(), "initialize");
txnLog = new FileTxnLog(this.dataDir); snapLog = new FileSnap(this.snapDir);
/** * truncate the transaction logs the zxid * specified * @param zxid the zxid to truncate the logs to * @return true if able to truncate the log, false if not * @throws IOException */ public boolean truncateLog(long zxid) throws IOException { // close the existing txnLog and snapLog close(); // truncate it FileTxnLog truncLog = new FileTxnLog(dataDir); boolean truncated = truncLog.truncate(zxid); truncLog.close(); // re-open the txnLog and snapLog // I'd rather just close/reopen this object itself, however that // would have a big impact outside ZKDatabase as there are other // objects holding a reference to this object. txnLog = new FileTxnLog(dataDir); snapLog = new FileSnap(snapDir); return truncated; }
@Test public void testForceSyncDefaultDisabled() { try { File file = new File("foo.10027c6de"); System.setProperty("zookeeper.forceSync","no"); FileTxnLog log = new FileTxnLog(file); Assert.assertFalse(log.isForceSync()); } finally { //Reset back to default. System.setProperty("zookeeper.forceSync","yes"); } }
/** * get the last zxid that was logged in the transaction logs * @return the last zxid logged in the transaction logs */ public long getLastLoggedZxid() { File[] files = getLogFiles(logDir.listFiles(), 0); long maxLog=files.length>0? Util.getZxidFromName(files[files.length-1].getName(),LOG_FILE_PREFIX):-1; // if a log file is more recent we must scan it to find // the highest zxid long zxid = maxLog; TxnIterator itr = null; try { FileTxnLog txn = new FileTxnLog(logDir); itr = txn.read(maxLog); while (true) { if(!itr.next()) break; TxnHeader hdr = itr.getHeader(); zxid = hdr.getZxid(); } } catch (IOException e) { LOG.warn("Unexpected exception", e); } finally { close(itr); } return zxid; }
@Test public void testForceSyncDefaultEnabled() { File file = new File("foo.10027c6de"); FileTxnLog log = new FileTxnLog(file); Assert.assertTrue(log.isForceSync()); }
/** * get the last zxid that was logged in the transaction logs * @return the last zxid logged in the transaction logs */ public long getLastLoggedZxid() { File[] files = getLogFiles(logDir.listFiles(), 0); long maxLog=files.length>0? Util.getZxidFromName(files[files.length-1].getName(),LOG_FILE_PREFIX):-1; // if a log file is more recent we must scan it to find // the highest zxid long zxid = maxLog; TxnIterator itr = null; try { FileTxnLog txn = new FileTxnLog(logDir); itr = txn.read(maxLog); while (true) { if(!itr.next()) break; TxnHeader hdr = itr.getHeader(); zxid = hdr.getZxid(); } } catch (IOException e) { LOG.warn("Unexpected exception", e); } finally { close(itr); } return zxid; }
Pair<Long, Long> getFirstLastZxid(File logFile) throws IOException { File tmp = createTmpDir(); Files.copy(logFile.toPath(), new File(tmp, "log.0").toPath()); FileTxnLog txnLog = new FileTxnLog(tmp); TxnLog.TxnIterator it = txnLog.read(0); long firstZxid = it.getHeader().getZxid(); long lastZxid = firstZxid; while (it.next()) { lastZxid = it.getHeader().getZxid(); } txnLog.close(); rmr(tmp); return new Pair<Long, Long>(firstZxid, lastZxid); }
/** * Simulates ZOOKEEPER-1069 and verifies that flush() before padLogFile * fixes it. */ @Test public void testPad() throws Exception { File tmpDir = ClientBase.createTmpDir(); FileTxnLog txnLog = new FileTxnLog(tmpDir); TxnHeader txnHeader = new TxnHeader(0xabcd, 0x123, 0x123, Time.currentElapsedTime(), ZooDefs.OpCode.create); Record txn = new CreateTxn("/Test", new byte[0], null, false, 1); txnLog.append(txnHeader, txn); FileInputStream in = new FileInputStream(tmpDir.getPath() + "/log." + Long.toHexString(txnHeader.getZxid())); BinaryInputArchive ia = BinaryInputArchive.getArchive(in); FileHeader header = new FileHeader(); header.deserialize(ia, "fileheader"); LOG.info("Received magic : " + header.getMagic() + " Expected : " + FileTxnLog.TXNLOG_MAGIC); Assert.assertTrue("Missing magic number ", header.getMagic() == FileTxnLog.TXNLOG_MAGIC); }
public void testSyncThresholdExceedCount() throws IOException { // Given ... // Set threshold to -1, as after the first commit it takes 0ms to commit to disk. java.lang.System.setProperty(FileTxnLog.ZOOKEEPER_FSYNC_WARNING_THRESHOLD_MS_PROPERTY, "-1"); ServerStats.Provider providerMock = mock(ServerStats.Provider.class); ServerStats serverStats = new ServerStats(providerMock); File logDir = ClientBase.createTmpDir(); FileTxnLog fileTxnLog = new FileTxnLog(logDir); fileTxnLog.setServerStats(serverStats); // Verify serverStats is 0 before any commit Assert.assertEquals(0L, serverStats.getFsyncThresholdExceedCount()); // When ... for (int i = 0; i < 50; i++) { fileTxnLog.append(new TxnHeader(1, 1, 1, 1, ZooDefs.OpCode.create), new CreateTxn("/testFsyncThresholdCountIncreased", new byte[]{}, ZooDefs.Ids.OPEN_ACL_UNSAFE, false, 0)); fileTxnLog.commit(); // only 1 commit, otherwise it will be flaky // Then ... verify serverStats is updated to the number of commits (as threshold is set to 0) Assert.assertEquals((long) i + 1 , serverStats.getFsyncThresholdExceedCount()); } }
DeleteTxn txn = new DeleteTxn("/foo"); File tmpDir = createTmpDir(); FileTxnLog txnLog = new FileTxnLog(tmpDir);
FileTxnLog txnLog = new FileTxnLog(logDir); TxnIterator itr = txnLog.read(0);
/** * Test that log size get update correctly */ @Test public void testGetCurrentLogSize() throws Exception { FileTxnLog.setTxnLogSizeLimit(-1); File tmpDir = ClientBase.createTmpDir(); FileTxnLog log = new FileTxnLog(tmpDir); FileTxnLog.setPreallocSize(PREALLOCATE); CreateRequest record = new CreateRequest(null, new byte[NODE_SIZE], ZooDefs.Ids.OPEN_ACL_UNSAFE, 0); int zxid = 1; for (int i = 0; i < 4; i++) { log.append(new TxnHeader(0, 0, zxid++, 0, 0), record); LOG.debug("Current log size: " + log.getCurrentLogSize()); } log.commit(); LOG.info("Current log size: " + log.getCurrentLogSize()); Assert.assertTrue(log.getCurrentLogSize() > (zxid - 1) * NODE_SIZE); for (int i = 0; i < 4; i++) { log.append(new TxnHeader(0, 0, zxid++, 0, 0), record); LOG.debug("Current log size: " + log.getCurrentLogSize()); } log.commit(); LOG.info("Current log size: " + log.getCurrentLogSize()); Assert.assertTrue(log.getCurrentLogSize() > (zxid - 1) * NODE_SIZE); }
@Test public void testPreAllocSizeSmallerThanTxnData() throws IOException { File logDir = ClientBase.createTmpDir(); FileTxnLog fileTxnLog = new FileTxnLog(logDir); // Set a small preAllocSize (.5 MB) final int preAllocSize = 500 * KB; FilePadding.setPreallocSize(preAllocSize); // Create dummy txn larger than preAllocSize // Since the file padding inserts a 0, we will fill the data with 0xff to ensure we corrupt the data if we put the 0 in the data byte[] data = new byte[2 * preAllocSize]; Arrays.fill(data, (byte) 0xff); // Append and commit 2 transactions to the log // Prior to ZOOKEEPER-2249, attempting to pad in association with the second transaction will corrupt the first fileTxnLog.append(new TxnHeader(1, 1, 1, 1, ZooDefs.OpCode.create), new CreateTxn("/testPreAllocSizeSmallerThanTxnData1", data, ZooDefs.Ids.OPEN_ACL_UNSAFE, false, 0)); fileTxnLog.commit(); fileTxnLog.append(new TxnHeader(1, 1, 2, 2, ZooDefs.OpCode.create), new CreateTxn("/testPreAllocSizeSmallerThanTxnData2", new byte[]{}, ZooDefs.Ids.OPEN_ACL_UNSAFE, false, 0)); fileTxnLog.commit(); fileTxnLog.close(); // Read the log back from disk, this will throw a java.io.IOException: CRC check failed prior to ZOOKEEPER-2249 FileTxnLog.FileTxnIterator fileTxnIterator = new FileTxnLog.FileTxnIterator(logDir, 0); // Verify the data in the first transaction CreateTxn createTxn = (CreateTxn) fileTxnIterator.getTxn(); Assert.assertTrue(Arrays.equals(createTxn.getData(), data)); // Verify the data in the second transaction fileTxnIterator.next(); createTxn = (CreateTxn) fileTxnIterator.getTxn(); Assert.assertTrue(Arrays.equals(createTxn.getData(), new byte[]{})); }
FileTxnLog txnLog = new FileTxnLog(logDir); TxnIterator itr = txnLog.read(1, false);
@Test public void testTruncationStreamReset() throws Exception { File tmpdir = ClientBase.createTmpDir(); FileTxnSnapLog snaplog = new FileTxnSnapLog(tmpdir, tmpdir); ZKDatabase zkdb = new ZKDatabase(snaplog); // make sure to snapshot, so that we have something there when // truncateLog reloads the db snaplog.save(zkdb.getDataTree(), zkdb.getSessionWithTimeOuts(), false); for (int i = 1; i <= 100; i++) { append(zkdb, i); } zkdb.truncateLog(1); append(zkdb, 200); zkdb.close(); // verify that the truncation and subsequent append were processed // correctly FileTxnLog txnlog = new FileTxnLog(new File(tmpdir, "version-2")); TxnIterator iter = txnlog.read(1); TxnHeader hdr = iter.getHeader(); Record txn = iter.getTxn(); Assert.assertEquals(1, hdr.getZxid()); Assert.assertTrue(txn instanceof SetDataTxn); iter.next(); hdr = iter.getHeader(); txn = iter.getTxn(); Assert.assertEquals(200, hdr.getZxid()); Assert.assertTrue(txn instanceof SetDataTxn); iter.close(); ClientBase.recursiveDelete(tmpdir); }