private List<String> getDeleteMessages() { List<String> messages = Lists.newArrayList(); for (String msg : logCustomizer.getLogs()) { if (msg.startsWith("Proceeding to delete [")) { messages.add(msg); } } return messages; }
private Iterable<Integer> getUpdates() { Pattern p = Pattern.compile(REGEX); List<Integer> updates = new ArrayList<>(); for (String msg : logCustomizer.getLogs()) { Matcher m = p.matcher(msg); if (m.find()) { updates.add(Integer.parseInt(m.group(1))); } } return updates; }
/** * Tests whether all the s3 uploads finished */ @Test public void testAsyncUploadFinished() { assertEquals(1, customLogs.getLogs().size()); customLogs.finished(); }
@Test public void multipleQueryRuns() { final int executions = 16; final int trackEvery = 5; final int numTraces = executions / trackEvery; OrderedPropertyIndexProvider.setThreshold(trackEvery); List<String> expectedLogs = Collections.nCopies(numTraces, OrderedIndex.DEPRECATION_MESSAGE); custom.starting(); for (int i = 0; i < executions; i++) { executeQuery("SELECT * FROM [oak:Unstructured]", SQL2); } assertThat(custom.getLogs(), is(expectedLogs)); custom.finished(); } }
@Test public void testContainsMatch() { LogCustomizer custom = LogCustomizer .forLogger("org.apache.jackrabbit.oak.commons.junit.LogCustomizerTest") .contains("Test Message") .create(); try { custom.starting(); LOG.info("test message"); LOG.info("test message 1"); LOG.info("1 test message"); List<String> logs = custom.getLogs(); assertTrue(logs.isEmpty()); LOG.info("Test Message"); assertEquals(1, logs.size()); LOG.info("1Test Message"); LOG.info("1Test Message2"); LOG.info("1 Test Message"); assertEquals(4, logs.size()); } finally { custom.finished(); } }
@Test public void testExactMatch() { LogCustomizer custom = LogCustomizer .forLogger("org.apache.jackrabbit.oak.commons.junit.LogCustomizerTest") .exactlyMatches("Test Message") .create(); try { custom.starting(); LOG.info("test message"); LOG.info("test message 1"); LOG.info("1 test message"); List<String> logs = custom.getLogs(); assertTrue(logs.isEmpty()); LOG.info("Test Message"); assertEquals(1, logs.size()); } finally { custom.finished(); } }
@Test public void singleQueryRun() { custom.starting(); executeQuery("SELECT * FROM [oak:Unstructured]", SQL2); List<String> logs = custom.getLogs(); assertEquals(1, logs.size()); assertThat(logs, hasItem(OrderedIndex.DEPRECATION_MESSAGE)); custom.finished(); }
public static void testIncorrectParams(List<String> argList, ArrayList<String> assertMsg, Class logger) { LogCustomizer customLogs = LogCustomizer .forLogger(logger.getName()) .enable(Level.INFO) .filter(Level.INFO) .contains(assertMsg.get(0)) .create(); customLogs.starting(); DataStoreCommand cmd = new DataStoreCommand(); try { cmd.execute(argList.toArray(new String[0])); } catch (Exception e) { log.error("", e); } Assert.assertNotNull(customLogs.getLogs().get(0)); customLogs.finished(); }
@Test public void noRunWhenClosed() throws Exception{ NodeStore store = new MemoryNodeStore(); IndexEditorProvider provider = new PropertyIndexEditorProvider(); AsyncIndexUpdate async = new AsyncIndexUpdate("async", store, provider); async.run(); async.close(); LogCustomizer lc = createLogCustomizer(Level.WARN); async.run(); assertEquals(1, lc.getLogs().size()); assertThat(lc.getLogs().get(0), containsString("Could not acquire run permit")); lc.finished(); async.close(); }
@Test public void testUpgradeCompromisedSerializedMap() throws IOException { // Close the init setup closer.close(); // Create pre-upgrade load File home = folder.newFolder(); File pendingUploadsFile = new File(home, DataStoreCacheUpgradeUtils.UPLOAD_MAP); createGibberishLoad(home, pendingUploadsFile); LogCustomizer lc = LogCustomizer.forLogger(DataStoreCacheUpgradeUtils.class.getName()) .filter(Level.WARN) .enable(Level.WARN) .create(); lc.starting(); // Start init(2, new TestStagingUploader(folder.newFolder()), home); assertThat(lc.getLogs().toString(), containsString("Error in reading pending uploads map")); }
@Test public void testLogs1() { LogCustomizer custom = LogCustomizer .forLogger( "org.apache.jackrabbit.oak.commons.junit.LogCustomizerTest") .enable(Level.DEBUG).create(); try { custom.starting(); LOG.debug("test message"); List<String> logs = custom.getLogs(); assertTrue(logs.size() == 1); assertThat("logs were recorded by custom logger", logs.toString(), containsString("test message")); } finally { custom.finished(); } }
@Test public void logWarnWhenSeekingBackAfterRead() throws Exception { byte[] fileBytes = writeFile(); LogCustomizer logRecorder = LogCustomizer .forLogger(OakStreamingIndexFile.class.getName()).enable(Level.WARN) .contains("Seeking back on streaming index file").create(); NodeBuilder fooBuilder = builder.child("foo"); try (OakStreamingIndexFile readFile = new OakStreamingIndexFile("foo", fooBuilder, "dirDetails", modeDependantBlobFactory.getNodeBuilderBlobFactory(fooBuilder)) ) { logRecorder.starting(); byte[] readBytes = new byte[fileBytes.length]; readFile.readBytes(readBytes, 0, 10); assertEquals("Don't log for simple reads", 0, logRecorder.getLogs().size()); readFile.seek(12); assertEquals("Don't log for forward seeks", 0, logRecorder.getLogs().size()); readFile.seek(2); assertEquals("Log warning for backward seeks", 1, logRecorder.getLogs().size()); } logRecorder.finished(); }
@Test public void testLogs2() { LogCustomizer custom = LogCustomizer .forLogger( "org.apache.jackrabbit.oak.commons.junit.LogCustomizerTest") .enable(Level.DEBUG).filter(Level.INFO).create(); try { custom.starting(); LOG.debug("test message"); List<String> logs = custom.getLogs(); assertTrue(logs.isEmpty()); } finally { custom.finished(); } }
@Test public void warnOnQueueFull() throws RepositoryException, InterruptedException, ExecutionException { LogCustomizer customLogs = LogCustomizer.forLogger(ChangeProcessor.class.getName()) .filter(Level.WARN) .contains(OBS_QUEUE_FULL_WARN) .create(); observationManager.addEventListener(listener, NODE_ADDED, TEST_PATH, true, null, null, false); try { customLogs.starting(); addNodeToFillObsQueue(); assertTrue("Observation queue full warning must get logged", customLogs.getLogs().size() > 0); customLogs.finished(); } finally { observationManager.removeEventListener(listener); } }
@Test public void withIndexDefSingleNode() throws RepositoryException, CommitFailedException { NodeBuilder root = EMPTY_NODE.builder(); createIndexDef(root); NodeState before = root.getNodeState(); root.child("n1").setProperty(indexedProperty, "dead"); NodeState after = root.getNodeState(); custom.starting(); root = hook.processCommit(before, after, CommitInfo.EMPTY).builder(); assertEquals(1, custom.getLogs().size()); assertThat(custom.getLogs(), hasItem(DEPRECATION_MESSAGE)); custom.finished(); NodeBuilder b = root.getChildNode(IndexConstants.INDEX_DEFINITIONS_NODE_TYPE) .getChildNode(indexName).getChildNode(IndexConstants.INDEX_CONTENT_NODE_NAME); assertFalse("nothing should have been touched under the actual index", b.exists()); }
@Test public void init12() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 2").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T12").initialSchema(1).upgradeToSchema(2).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasSplitDocs()); int statementsPerTable = 5; assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), statementsPerTable * RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void withIndexMultipleNodes() throws RepositoryException, CommitFailedException { final int threshold = 5; final int nodes = 16; final int traces = 1 + (nodes - 1) / threshold; OrderedPropertyIndexEditorProvider.setThreshold(threshold); final List<String> expected = Collections.nCopies(traces, DEPRECATION_MESSAGE); NodeBuilder root = EMPTY_NODE.builder(); createIndexDef(root); custom.starting(); for (int i = 0; i < nodes; i++) { NodeState before = root.getNodeState(); root.child("n" + i).setProperty(indexedProperty, "dead" + i); NodeState after = root.getNodeState(); root = hook.processCommit(before, after, CommitInfo.EMPTY).builder(); } assertThat(custom.getLogs(), is(expected)); custom.finished(); assertFalse(root.getChildNode(INDEX_DEFINITIONS_NAME).getChildNode(indexName) .getChildNode(INDEX_CONTENT_NODE_NAME).exists()); }
@Test public void init01() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 1").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T01").initialSchema(0).upgradeToSchema(1).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void init11() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 1").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T11").initialSchema(1).upgradeToSchema(1).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), 0, logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void init22() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T" + "22").initialSchema(2).upgradeToSchema(2).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); assertTrue(meta.hasSplitDocs()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), 0, logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }