/** * This tests that delete_delta_x_y dirs will be not produced during minor compaction if no input delete events. * See HIVE-20941. * @throws Exception */ @Test public void testDeleteEventsCompaction() throws Exception { int[][] tableData1 = {{1, 2}}; int[][] tableData2 = {{2, 3}}; int[][] tableData3 = {{3, 4}}; runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData1)); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData2)); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData3)); TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); txnHandler.compact(new CompactionRequest("default", Table.ACIDTBL.name().toLowerCase(), CompactionType.MINOR)); runWorker(hiveConf); runCleaner(hiveConf); FileSystem fs = FileSystem.get(hiveConf); FileStatus[] fileStatuses = fs.globStatus(new Path(TEST_WAREHOUSE_DIR + "/" + Table.ACIDTBL.name().toLowerCase() + "/*")); for(FileStatus fileStatus : fileStatuses) { Assert.assertFalse(fileStatus.getPath().getName().startsWith(AcidUtils.DELETE_DELTA_PREFIX)); } }
runStatementOnDriver("select a,b from " + tblName + " order by a")); runCleaner(hiveConf);
runCleaner(hiveConf); // Cleaner would remove the obsolete files.
assertExpectedFileSet(expectedFiles, getWarehouseDir() + "/nobuckets"); TestTxnCommands2.runCleaner(hiveConf); rs = runStatementOnDriver("select c1, c2, c3 from nobuckets order by c1, c2, c3"); int[][] result = {{0,0,17},{1,1,17},{2,2,2},{3,3,3}};
runCleaner(hiveConf);
runCleaner(hiveConf);
runCleaner(hiveConf);
runCleaner(hiveConf);
runCleaner(hiveConf);
/** * Make sure there's no FileSystem$Cache$Key leak due to UGI use * @throws Exception */ @Test public void testFileSystemUnCaching() throws Exception { int cacheSizeBefore; int cacheSizeAfter; // get the size of cache BEFORE cacheSizeBefore = getFileSystemCacheSize(); // Insert a row to ACID table runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(1,2)"); // Perform a major compaction runStatementOnDriver("alter table " + Table.ACIDTBL + " compact 'major'"); runWorker(hiveConf); runCleaner(hiveConf); // get the size of cache AFTER cacheSizeAfter = getFileSystemCacheSize(); Assert.assertEquals(cacheSizeBefore, cacheSizeAfter); }
/** * make sure Aborted txns don't red-flag a base_xxxx (HIVE-14350) */ @Test public void testNoHistory() throws Exception { int[][] tableData = {{1,2},{3,4}}; runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData)); hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData)); hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'"); runWorker(hiveConf); runCleaner(hiveConf); runStatementOnDriver("select count(*) from " + Table.ACIDTBL); }
/** * Test update that hits multiple partitions (i.e. requries dynamic partition insert to process) * @throws Exception */ @Test public void updateDeletePartitioned() throws Exception { int[][] tableData = {{1,2},{3,4},{5,6}}; runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p=1) (a,b) " + makeValuesClause(tableData)); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p=2) (a,b) " + makeValuesClause(tableData)); TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); txnHandler.compact(new CompactionRequest("default", Table.ACIDTBLPART.name(), CompactionType.MAJOR)); runWorker(hiveConf); runCleaner(hiveConf); runStatementOnDriver("update " + Table.ACIDTBLPART + " set b = b + 1 where a = 3"); txnHandler.compact(new CompactionRequest("default", Table.ACIDTBLPART.toString(), CompactionType.MAJOR)); runWorker(hiveConf); runCleaner(hiveConf); List<String> rs = runStatementOnDriver("select p,a,b from " + Table.ACIDTBLPART + " order by p, a, b"); int[][] expectedData = {{1,1,2},{1,3,5},{1,5,6},{2,1,2},{2,3,5},{2,5,6}}; Assert.assertEquals("Update " + Table.ACIDTBLPART + " didn't match:", stringifyValues(expectedData), rs); }
TestTxnCommands2.runCleaner(hiveConf); actualList = fs.listStatus(new Path(warehousePath + "/t"), FileUtils.HIDDEN_FILES_PATH_FILTER);
runCleaner(hiveConf); verifyDirAndResult(2);
txnHandler.compact(new CompactionRequest("default", Table.ACIDTBL.name().toLowerCase(), CompactionType.MAJOR)); runWorker(hiveConf); runCleaner(hiveConf); txnHandler.cleanTxnToWriteIdTable(); runCleaner(hiveConf); txnHandler.cleanEmptyAbortedTxns(); txnHandler.cleanTxnToWriteIdTable();
hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED)+ 1), countCompacts(txnHandler)); runCleaner(hiveConf); // transition to Success state compactionHistoryService.run(); checkCompactionState(new CompactionsByState(
count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='r' and CQ_TYPE='i'"); Assert.assertEquals(1, count); TestTxnCommands2.runCleaner(conf); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11'"); Assert.assertEquals(0, count); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='r' and CQ_TYPE='i'"); Assert.assertEquals(1, count); TestTxnCommands2.runCleaner(conf); count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p'"); Assert.assertEquals(0, count);