@Test public void gcCheckDeletedSize() throws Exception { log.info("Starting gcCheckDeletedSize()"); // Capture logs for the second round of gc LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.INFO) .filter(Level.INFO) .contains("Estimated size recovered for") .create(); customLogs.starting(); Set<String> existingAfterGC = executeGarbageCollection(cluster, cluster.getCollector(0),false); assertEquals(1, customLogs.getLogs().size()); long deletedSize = (cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size()) * 100; assertTrue(customLogs.getLogs().get(0).contains(String.valueOf(deletedSize))); assertStats(cluster.statsProvider, 1, 0, cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size(), cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size(), NAME); assertEquals(deletedSize, getStatCount(cluster.statsProvider, NAME, TOTAL_SIZE_DELETED)); customLogs.finished(); assertTrue(Sets.symmetricDifference(cluster.blobStoreState.blobsPresent, existingAfterGC).isEmpty()); }
public static void testIncorrectParams(List<String> argList, ArrayList<String> assertMsg, Class logger) { LogCustomizer customLogs = LogCustomizer .forLogger(logger.getName()) .enable(Level.INFO) .filter(Level.INFO) .contains(assertMsg.get(0)) .create(); customLogs.starting(); DataStoreCommand cmd = new DataStoreCommand(); try { cmd.execute(argList.toArray(new String[0])); } catch (Exception e) { log.error("", e); } Assert.assertNotNull(customLogs.getLogs().get(0)); customLogs.finished(); }
@Test public void checkMark() throws Exception { LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); DataStoreState state = setUp(10); log.info("{} blobs available : {}", state.blobsPresent.size(), state.blobsPresent); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); String rootFolder = folder.newFolder().getAbsolutePath(); MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); gcObj.collectGarbage(true); customLogs.finished(); assertBlobReferenceRecords(state.blobsPresent, rootFolder); }
.forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.WARN) .filter(Level.WARN) .contains("Error occurred while deleting blob with id") .create();
@Test public void warnOnRepeatedQueueFull() throws RepositoryException, InterruptedException, ExecutionException { LogCustomizer warnLogs = LogCustomizer.forLogger(ChangeProcessor.class.getName()) .filter(Level.WARN) .contains(OBS_QUEUE_FULL_WARN) .create(); LogCustomizer debugLogs = LogCustomizer.forLogger(ChangeProcessor.class.getName()) .filter(Level.DEBUG) .contains(OBS_QUEUE_FULL_WARN) .create();
.filter(Level.WARN) .create();
@Test public void reindexForDisabledIndexes() throws Exception{ EditorHook hook = new EditorHook( new IndexUpdateProvider(new CompositeIndexEditorProvider( new PropertyIndexEditorProvider(), new ReferenceEditorProvider() ))); NodeState before = builder.getNodeState(); createIndexDefinition(builder.child(INDEX_DEFINITIONS_NAME), "fooIndex", true, false, ImmutableSet.of("foo"), null); builder.child("testRoot").setProperty("foo", "abc"); NodeState after = builder.getNodeState(); NodeState indexed = hook.processCommit(before, after, CommitInfo.EMPTY); before = indexed; builder = before.builder(); builder.getChildNode("oak:index").getChildNode("fooIndex").setProperty(TYPE_PROPERTY_NAME, TYPE_DISABLED); builder.getChildNode("oak:index").getChildNode("fooIndex").setProperty(REINDEX_PROPERTY_NAME, true); after = builder.getNodeState(); LogCustomizer customLogs = LogCustomizer.forLogger(IndexUpdate.class.getName()).filter(Level.INFO).create(); customLogs.starting(); before = after; builder = before.builder(); builder.child("testRoot2").setProperty("foo", "abc"); after = builder.getNodeState(); indexed = hook.processCommit(before, after, CommitInfo.EMPTY); assertTrue(customLogs.getLogs().isEmpty()); customLogs.finished(); }
.filter(Level.WARN) .create(); lc.starting();
@Test public void perfLog() throws Exception{ String logName = DocumentStoreStats.class.getName() + ".perf"; LogCustomizer customLogs = LogCustomizer.forLogger(logName) .filter(Level.TRACE) .create(); enableLevel(logName, Level.INFO); customLogs.starting(); //No logs untill debug enabled stats.doneFindAndModify(100, Collection.NODES, "foo", true, true, 0); assertEquals(0, customLogs.getLogs().size()); stats.doneFindAndModify(TimeUnit.SECONDS.toNanos(10), Collection.NODES, "foo", true, true, 0); assertEquals(0, customLogs.getLogs().size()); //Change level to DEBUG - Now threshold rule applies enableLevel(logName, Level.DEBUG); stats.doneFindAndModify(100, Collection.NODES, "foo", true, true, 0); assertEquals(0, customLogs.getLogs().size()); stats.doneFindAndModify(TimeUnit.SECONDS.toNanos(10), Collection.NODES, "foo", true, true, 0); assertEquals(1, customLogs.getLogs().size()); //With trace level everything is logged enableLevel(logName, Level.TRACE); stats.doneFindAndModify(100, Collection.NODES, "foo", true, true, 0); assertEquals(2, customLogs.getLogs().size()); customLogs.finished(); }
@Test public void checkMark() throws Exception { String rootFolder = folder.newFolder().getAbsolutePath(); LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); DataStoreState state = setUp(true, 10); log.info("{} blobs available : {}", state.blobsPresent.size(), state.blobsPresent); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); gcObj.collectGarbage(true); customLogs.finished(); assertBlobReferences(state.blobsPresent, rootFolder); }
@Test public void checkConsistencyPathLogging() throws Exception { String rootFolder = folder.newFolder().getAbsolutePath(); LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); setUp(false); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); MarkSweepGarbageCollector gcObj = init(86400, executor, rootFolder); gcObj.checkConsistency(); customLogs.finished(); assertBlobReferenceRecords(2, rootFolder); }
@Test public void testUpgradeCompromisedSerializedMap() throws IOException { // Close the init setup closer.close(); // Create pre-upgrade load File home = folder.newFolder(); File pendingUploadsFile = new File(home, DataStoreCacheUpgradeUtils.UPLOAD_MAP); createGibberishLoad(home, pendingUploadsFile); LogCustomizer lc = LogCustomizer.forLogger(DataStoreCacheUpgradeUtils.class.getName()) .filter(Level.WARN) .enable(Level.WARN) .create(); lc.starting(); // Start init(2, new TestStagingUploader(folder.newFolder()), home); assertThat(lc.getLogs().toString(), containsString("Error in reading pending uploads map")); }
@Test public void testLogs2() { LogCustomizer custom = LogCustomizer .forLogger( "org.apache.jackrabbit.oak.commons.junit.LogCustomizerTest") .enable(Level.DEBUG).filter(Level.INFO).create(); try { custom.starting(); LOG.debug("test message"); List<String> logs = custom.getLogs(); assertTrue(logs.isEmpty()); } finally { custom.finished(); } }
@Test public void warnOnQueueFull() throws RepositoryException, InterruptedException, ExecutionException { LogCustomizer customLogs = LogCustomizer.forLogger(ChangeProcessor.class.getName()) .filter(Level.WARN) .contains(OBS_QUEUE_FULL_WARN) .create(); observationManager.addEventListener(listener, NODE_ADDED, TEST_PATH, true, null, null, false); try { customLogs.starting(); addNodeToFillObsQueue(); assertTrue("Observation queue full warning must get logged", customLogs.getLogs().size() > 0); customLogs.finished(); } finally { observationManager.removeEventListener(listener); } }
@Test public void checkGcPathLogging() throws Exception { String rootFolder = folder.newFolder().getAbsolutePath(); LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); setUp(false); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); gcObj.collectGarbage(true); customLogs.finished(); assertBlobReferenceRecords(1, rootFolder); }
@Before public void prepare() throws Exception { // Capture logs customLogs = LogCustomizer .forLogger(UploadStagingCache.class.getName()) .enable(Level.INFO) .filter(Level.INFO) .contains("Uploads in progress on close [0]") .create(); customLogs.starting(); super.prepare(); }
private static LogCustomizer createLogCustomizer(Level level){ LogCustomizer lc = LogCustomizer.forLogger(AsyncIndexUpdate.class.getName()) .filter(level) .enable(level) .create(); lc.starting(); return lc; }