@Test public void gcCheckDeletedSize() throws Exception { log.info("Starting gcCheckDeletedSize()"); // Capture logs for the second round of gc LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.INFO) .filter(Level.INFO) .contains("Estimated size recovered for") .create(); customLogs.starting(); Set<String> existingAfterGC = executeGarbageCollection(cluster, cluster.getCollector(0),false); assertEquals(1, customLogs.getLogs().size()); long deletedSize = (cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size()) * 100; assertTrue(customLogs.getLogs().get(0).contains(String.valueOf(deletedSize))); assertStats(cluster.statsProvider, 1, 0, cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size(), cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size(), NAME); assertEquals(deletedSize, getStatCount(cluster.statsProvider, NAME, TOTAL_SIZE_DELETED)); customLogs.finished(); assertTrue(Sets.symmetricDifference(cluster.blobStoreState.blobsPresent, existingAfterGC).isEmpty()); }
@Test public void queryIteratorNotConsumedTest() throws SQLException, NoSuchMethodException, SecurityException, IllegalAccessException, IllegalArgumentException, InvocationTargetException { LogCustomizer customLogs = LogCustomizer.forLogger(RDBDocumentStoreJDBC.class.getName()).enable(Level.DEBUG).contains("finalizing unclosed").create(); customLogs.starting(); insertTestResource(this.getClass().getName() + "." + name.getMethodName()); MyConnectionHandler ch = new MyConnectionHandler(super.rdbDataSource); RDBTableMetaData tmd = ((RDBDocumentStore) super.ds).getTable(Collection.NODES); List<QueryCondition> conditions = Collections.emptyList(); Iterator<RDBRow> qi = jdbc.queryAsIterator(ch, tmd, null, null, RDBDocumentStore.EMPTY_KEY_PATTERN, conditions, Integer.MAX_VALUE, null); assertTrue(qi instanceof Closeable); assertEquals(1, ch.cnt.get()); Method fin = qi.getClass().getDeclaredMethod("finalize"); try { fin.setAccessible(true); fin.invoke(qi); assertTrue("finalizing non-consumed iterator should generate log entry", customLogs.getLogs().size() >= 1); } finally { Utils.closeIfCloseable(qi); fin.setAccessible(false); customLogs.finished(); } }
public static void testIncorrectParams(List<String> argList, ArrayList<String> assertMsg, Class logger) { LogCustomizer customLogs = LogCustomizer .forLogger(logger.getName()) .enable(Level.INFO) .filter(Level.INFO) .contains(assertMsg.get(0)) .create(); customLogs.starting(); DataStoreCommand cmd = new DataStoreCommand(); try { cmd.execute(argList.toArray(new String[0])); } catch (Exception e) { log.error("", e); } Assert.assertNotNull(customLogs.getLogs().get(0)); customLogs.finished(); }
@Test public void settingConfigDifferentFromCLIWarns() { String oldVal = System.getProperty(ENABLE_WRITING_SINGLE_BLOB_INDEX_FILE_PARAM); final LogCustomizer custom = LogCustomizer .forLogger(BufferedOakDirectory.class.getName()) .contains("Ignoring configuration ") .enable(Level.WARN).create(); custom.starting(); System.setProperty(ENABLE_WRITING_SINGLE_BLOB_INDEX_FILE_PARAM, "true"); reReadCommandLineParam(); BufferedOakDirectory.setEnableWritingSingleBlobIndexFile(false); assertEquals("Warn on conflicting config on CLI and set method", 1, custom.getLogs().size()); custom.finished(); custom.starting(); System.setProperty(ENABLE_WRITING_SINGLE_BLOB_INDEX_FILE_PARAM, "false"); reReadCommandLineParam(); BufferedOakDirectory.setEnableWritingSingleBlobIndexFile(true); assertEquals("Warn on conflicting config on CLI and set method", 1, custom.getLogs().size()); custom.finished(); if (oldVal == null) { System.clearProperty(ENABLE_WRITING_SINGLE_BLOB_INDEX_FILE_PARAM); } else { System.setProperty(ENABLE_WRITING_SINGLE_BLOB_INDEX_FILE_PARAM, oldVal); } }
@Test public void init01fail() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("Attempted to upgrade").create(); logCustomizer.starting(); Assume.assumeTrue(ds instanceof RDBDataSourceWrapper); RDBDataSourceWrapper wds = (RDBDataSourceWrapper)ds; wds.setFailAlterTableAddColumnStatements(true); RDBOptions op = new RDBOptions().tablePrefix("T01F").initialSchema(0).upgradeToSchema(1).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertFalse(meta.hasVersion()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size()); UpdateOp testInsert = new UpdateOp(Utils.getIdFromPath("/foo"), true); assertTrue(rdb.create(Collection.NODES, Collections.singletonList(testInsert))); } finally { wds.setFailAlterTableAddColumnStatements(false); logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void queryIteratorConsumedTest() throws SQLException { insertTestResource(this.getClass().getName() + "." + name.getMethodName()); LogCustomizer customLogs = LogCustomizer.forLogger(RDBDocumentStoreJDBC.class.getName()).enable(Level.DEBUG) .contains("Query on ").create(); customLogs.starting(); MyConnectionHandler ch = new MyConnectionHandler(super.rdbDataSource); RDBTableMetaData tmd = ((RDBDocumentStore) super.ds).getTable(Collection.NODES); List<QueryCondition> conditions = Collections.emptyList(); try { Iterator<RDBRow> qi = jdbc.queryAsIterator(ch, tmd, null, null, RDBDocumentStore.EMPTY_KEY_PATTERN, conditions, Integer.MAX_VALUE, null); assertTrue(qi instanceof Closeable); assertEquals(1, ch.cnt.get()); while (qi.hasNext()) { qi.next(); } assertEquals(0, ch.cnt.get()); assertEquals("should have a DEBUG level log entry", 1, customLogs.getLogs().size()); } finally { customLogs.finished(); customLogs = null; } }
@Test public void init22() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T" + "22").initialSchema(2).upgradeToSchema(2).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); assertTrue(meta.hasSplitDocs()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), 0, logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void init01() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 1").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T01").initialSchema(0).upgradeToSchema(1).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void init12() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 2").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T12").initialSchema(1).upgradeToSchema(2).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasSplitDocs()); int statementsPerTable = 5; assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), statementsPerTable * RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void logWarnWhenSeekingBackAfterRead() throws Exception { byte[] fileBytes = writeFile(); LogCustomizer logRecorder = LogCustomizer .forLogger(OakStreamingIndexFile.class.getName()).enable(Level.WARN) .contains("Seeking back on streaming index file").create(); NodeBuilder fooBuilder = builder.child("foo"); try (OakStreamingIndexFile readFile = new OakStreamingIndexFile("foo", fooBuilder, "dirDetails", modeDependantBlobFactory.getNodeBuilderBlobFactory(fooBuilder)) ) { logRecorder.starting(); byte[] readBytes = new byte[fileBytes.length]; readFile.readBytes(readBytes, 0, 10); assertEquals("Don't log for simple reads", 0, logRecorder.getLogs().size()); readFile.seek(12); assertEquals("Don't log for forward seeks", 0, logRecorder.getLogs().size()); readFile.seek(2); assertEquals("Log warning for backward seeks", 1, logRecorder.getLogs().size()); } logRecorder.finished(); }
@Test public void init11() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 1").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T11").initialSchema(1).upgradeToSchema(1).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), 0, logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void checkMark() throws Exception { LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); DataStoreState state = setUp(10); log.info("{} blobs available : {}", state.blobsPresent.size(), state.blobsPresent); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); String rootFolder = folder.newFolder().getAbsolutePath(); MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); gcObj.collectGarbage(true); customLogs.finished(); assertBlobReferenceRecords(state.blobsPresent, rootFolder); }
@Test public void checkMark() throws Exception { String rootFolder = folder.newFolder().getAbsolutePath(); LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); DataStoreState state = setUp(true, 10); log.info("{} blobs available : {}", state.blobsPresent.size(), state.blobsPresent); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); gcObj.collectGarbage(true); customLogs.finished(); assertBlobReferences(state.blobsPresent, rootFolder); }
@Test public void checkConsistencyPathLogging() throws Exception { String rootFolder = folder.newFolder().getAbsolutePath(); LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); setUp(false); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); MarkSweepGarbageCollector gcObj = init(86400, executor, rootFolder); gcObj.checkConsistency(); customLogs.finished(); assertBlobReferenceRecords(2, rootFolder); }
@Test public void checkGcPathLogging() throws Exception { String rootFolder = folder.newFolder().getAbsolutePath(); LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); setUp(false); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); gcObj.collectGarbage(true); customLogs.finished(); assertBlobReferenceRecords(1, rootFolder); }
@Test public void testUpgradeCompromisedSerializedMap() throws IOException { // Close the init setup closer.close(); // Create pre-upgrade load File home = folder.newFolder(); File pendingUploadsFile = new File(home, DataStoreCacheUpgradeUtils.UPLOAD_MAP); createGibberishLoad(home, pendingUploadsFile); LogCustomizer lc = LogCustomizer.forLogger(DataStoreCacheUpgradeUtils.class.getName()) .filter(Level.WARN) .enable(Level.WARN) .create(); lc.starting(); // Start init(2, new TestStagingUploader(folder.newFolder()), home); assertThat(lc.getLogs().toString(), containsString("Error in reading pending uploads map")); }
@Test public void testLogs1() { LogCustomizer custom = LogCustomizer .forLogger( "org.apache.jackrabbit.oak.commons.junit.LogCustomizerTest") .enable(Level.DEBUG).create(); try { custom.starting(); LOG.debug("test message"); List<String> logs = custom.getLogs(); assertTrue(logs.size() == 1); assertThat("logs were recorded by custom logger", logs.toString(), containsString("test message")); } finally { custom.finished(); } }
@Test public void testLogs2() { LogCustomizer custom = LogCustomizer .forLogger( "org.apache.jackrabbit.oak.commons.junit.LogCustomizerTest") .enable(Level.DEBUG).filter(Level.INFO).create(); try { custom.starting(); LOG.debug("test message"); List<String> logs = custom.getLogs(); assertTrue(logs.isEmpty()); } finally { custom.finished(); } }
private static LogCustomizer createLogCustomizer(Level level){ LogCustomizer lc = LogCustomizer.forLogger(AsyncIndexUpdate.class.getName()) .filter(level) .enable(level) .create(); lc.starting(); return lc; }
@Before public void prepare() throws Exception { // Capture logs customLogs = LogCustomizer .forLogger(UploadStagingCache.class.getName()) .enable(Level.INFO) .filter(Level.INFO) .contains("Uploads in progress on close [0]") .create(); customLogs.starting(); super.prepare(); }