/** * This tests that delete_delta_x_y dirs will be not produced during minor compaction if no input delete events. * See HIVE-20941. * @throws Exception */ @Test public void testDeleteEventsCompaction() throws Exception { int[][] tableData1 = {{1, 2}}; int[][] tableData2 = {{2, 3}}; int[][] tableData3 = {{3, 4}}; runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData1)); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData2)); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData3)); TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); txnHandler.compact(new CompactionRequest("default", Table.ACIDTBL.name().toLowerCase(), CompactionType.MINOR)); runWorker(hiveConf); runCleaner(hiveConf); FileSystem fs = FileSystem.get(hiveConf); FileStatus[] fileStatuses = fs.globStatus(new Path(TEST_WAREHOUSE_DIR + "/" + Table.ACIDTBL.name().toLowerCase() + "/*")); for(FileStatus fileStatus : fileStatuses) { Assert.assertFalse(fileStatus.getPath().getName().startsWith(AcidUtils.DELETE_DELTA_PREFIX)); } }
runWorker(hiveConf);
runWorker(hiveConf);
@Test public void testCompactWithDelete() throws Exception { int[][] tableData = {{1,2},{3,4}}; runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData)); runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'"); runWorker(hiveConf); runStatementOnDriver("delete from " + Table.ACIDTBL + " where b = 4"); runStatementOnDriver("update " + Table.ACIDTBL + " set b = -2 where b = 2"); runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MINOR'"); runWorker(hiveConf); TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest()); Assert.assertEquals("Unexpected number of compactions in history", 2, resp.getCompactsSize()); Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState()); Assert.assertEquals("Unexpected 1 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(1).getState()); }
TestTxnCommands2.runWorker(hiveConf);
@Test public void testETLSplitStrategyForACID() throws Exception { hiveConf.setVar(HiveConf.ConfVars.HIVE_ORC_SPLIT_STRATEGY, "ETL"); hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, true); runStatementOnDriver("insert into " + Table.ACIDTBL + " values(1,2)"); runStatementOnDriver("alter table " + Table.ACIDTBL + " compact 'MAJOR'"); runWorker(hiveConf); List<String> rs = runStatementOnDriver("select * from " + Table.ACIDTBL + " where a = 1"); int[][] resultData = new int[][] {{1,2}}; Assert.assertEquals(stringifyValues(resultData), rs); }
/** * https://issues.apache.org/jira/browse/HIVE-17391 */ @Test public void testEmptyInTblproperties() throws Exception { runStatementOnDriver("create table t1 " + "(a int, b int) stored as orc TBLPROPERTIES ('serialization.null.format'='', 'transactional'='true')"); runStatementOnDriver("insert into t1 " + "(a,b) values(1,7),(3,7)"); runStatementOnDriver("update t1" + " set b = -2 where b = 2"); runStatementOnDriver("alter table t1 " + " compact 'MAJOR'"); runWorker(hiveConf); TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest()); Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize()); Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState()); Assert.assertTrue(resp.getCompacts().get(0).getHadoopJobId().startsWith("job_local")); }
@Test public void testAcidWithSchemaEvolution() throws Exception { hiveConf.setVar(HiveConf.ConfVars.HIVE_ORC_SPLIT_STRATEGY, "ETL"); String tblName = "acidTblWithSchemaEvol"; runStatementOnDriver("drop table if exists " + tblName); runStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " + " CLUSTERED BY(a) INTO 2 BUCKETS" + //currently ACID requires table to be bucketed " STORED AS ORC TBLPROPERTIES ('transactional'='true')"); runStatementOnDriver("INSERT INTO " + tblName + " VALUES (1, 'foo'), (2, 'bar')"); // Major compact to create a base that has ACID schema. runStatementOnDriver("ALTER TABLE " + tblName + " COMPACT 'MAJOR'"); runWorker(hiveConf); // Alter table for perform schema evolution. runStatementOnDriver("ALTER TABLE " + tblName + " ADD COLUMNS(c int)"); // Validate there is an added NULL for column c. List<String> rs = runStatementOnDriver("SELECT * FROM " + tblName + " ORDER BY a"); String[] expectedResult = { "1\tfoo\tNULL", "2\tbar\tNULL" }; Assert.assertEquals(Arrays.asList(expectedResult), rs); } /**
/** * Make sure there's no FileSystem$Cache$Key leak due to UGI use * @throws Exception */ @Test public void testFileSystemUnCaching() throws Exception { int cacheSizeBefore; int cacheSizeAfter; // get the size of cache BEFORE cacheSizeBefore = getFileSystemCacheSize(); // Insert a row to ACID table runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(1,2)"); // Perform a major compaction runStatementOnDriver("alter table " + Table.ACIDTBL + " compact 'major'"); runWorker(hiveConf); runCleaner(hiveConf); // get the size of cache AFTER cacheSizeAfter = getFileSystemCacheSize(); Assert.assertEquals(cacheSizeBefore, cacheSizeAfter); }
/** * make sure Aborted txns don't red-flag a base_xxxx (HIVE-14350) */ @Test public void testNoHistory() throws Exception { int[][] tableData = {{1,2},{3,4}}; runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData)); hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData)); hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'"); runWorker(hiveConf); runCleaner(hiveConf); runStatementOnDriver("select count(*) from " + Table.ACIDTBL); }
/** * Test update that hits multiple partitions (i.e. requries dynamic partition insert to process) * @throws Exception */ @Test public void updateDeletePartitioned() throws Exception { int[][] tableData = {{1,2},{3,4},{5,6}}; runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p=1) (a,b) " + makeValuesClause(tableData)); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p=2) (a,b) " + makeValuesClause(tableData)); TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); txnHandler.compact(new CompactionRequest("default", Table.ACIDTBLPART.name(), CompactionType.MAJOR)); runWorker(hiveConf); runCleaner(hiveConf); runStatementOnDriver("update " + Table.ACIDTBLPART + " set b = b + 1 where a = 3"); txnHandler.compact(new CompactionRequest("default", Table.ACIDTBLPART.toString(), CompactionType.MAJOR)); runWorker(hiveConf); runCleaner(hiveConf); List<String> rs = runStatementOnDriver("select p,a,b from " + Table.ACIDTBLPART + " order by p, a, b"); int[][] expectedData = {{1,1,2},{1,3,5},{1,5,6},{2,1,2},{2,3,5},{2,5,6}}; Assert.assertEquals("Update " + Table.ACIDTBLPART + " didn't match:", stringifyValues(expectedData), rs); }
@Test public void testAlterTable() throws Exception { int[][] tableData = {{1,2}}; runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData)); runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'"); runWorker(hiveConf); int[][] tableData2 = {{5,6}}; runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData2)); List<String> rs1 = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " where b > 0 order by a,b"); runStatementOnDriver("alter table " + Table.ACIDTBL + " add columns(c int)"); int[][] moreTableData = {{7,8,9}}; runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b,c) " + makeValuesClause(moreTableData)); List<String> rs0 = runStatementOnDriver("select a,b,c from " + Table.ACIDTBL + " where a > 0 order by a,b,c"); } // @Ignore("not needed but useful for testing")
@Test public void testOriginalFileReaderWhenNonAcidConvertedToAcid() throws Exception { // 1. Insert five rows to Non-ACID table. runStatementOnDriver("insert into " + Table.NONACIDORCTBL + "(a,b) values(1,2),(3,4),(5,6),(7,8),(9,10)"); // 2. Convert NONACIDORCTBL to ACID table. //todo: remove trans_prop after HIVE-17089 runStatementOnDriver("alter table " + Table.NONACIDORCTBL + " SET TBLPROPERTIES ('transactional'='true', 'transactional_properties'='default')"); runStatementOnDriver("update " + Table.NONACIDORCTBL + " set b = b*2 where b in (4,10)"); runStatementOnDriver("delete from " + Table.NONACIDORCTBL + " where a = 7"); List<String> rs = runStatementOnDriver("select a,b from " + Table.NONACIDORCTBL + " order by a,b"); int[][] resultData = new int[][] {{1,2}, {3,8}, {5,6}, {9,20}}; Assert.assertEquals(stringifyValues(resultData), rs); // 3. Perform a major compaction. runStatementOnDriver("alter table "+ Table.NONACIDORCTBL + " compact 'MAJOR'"); runWorker(hiveConf); TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest()); Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize()); Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState()); Assert.assertTrue(resp.getCompacts().get(0).getHadoopJobId().startsWith("job_local")); // 3. Perform a delete. runStatementOnDriver("delete from " + Table.NONACIDORCTBL + " where a = 1"); rs = runStatementOnDriver("select a,b from " + Table.NONACIDORCTBL + " order by a,b"); resultData = new int[][] {{3,8}, {5,6}, {9,20}}; Assert.assertEquals(stringifyValues(resultData), rs); } /**
@Test public void testBucketCodec() throws Exception { d.destroy(); //insert data in "legacy" format hiveConf.setIntVar(HiveConf.ConfVars.TESTMODE_BUCKET_CODEC_VERSION, 0); d = new Driver(hiveConf); int[][] targetVals = {{2,1},{4,3},{5,6},{7,8}}; runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(targetVals)); d.destroy(); hiveConf.setIntVar(HiveConf.ConfVars.TESTMODE_BUCKET_CODEC_VERSION, 1); d = new Driver(hiveConf); //do some operations with new format runStatementOnDriver("update " + Table.ACIDTBL + " set b=11 where a in (5,7)"); runStatementOnDriver("insert into " + Table.ACIDTBL + " values(11,11)"); runStatementOnDriver("delete from " + Table.ACIDTBL + " where a = 7"); //make sure we get the right data back before/after compactions List<String> r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); int[][] rExpected = {{2,1},{4,3},{5,11},{11,11}}; Assert.assertEquals(stringifyValues(rExpected), r); runStatementOnDriver("ALTER TABLE " + Table.ACIDTBL + " COMPACT 'MINOR'"); runWorker(hiveConf); r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); Assert.assertEquals(stringifyValues(rExpected), r); runStatementOnDriver("ALTER TABLE " + Table.ACIDTBL + " COMPACT 'MAJOR'"); runWorker(hiveConf); r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); Assert.assertEquals(stringifyValues(rExpected), r); } /**
TestTxnCommands2.runWorker(hiveConf); String[][] expected3 = new String[][] { {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2",
TestTxnCommands2.runWorker(hiveConf); String[][] expected2 = new String[][] { {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000001_v0000023/bucket_00000"},
TestTxnCommands2.runWorker(hiveConf);
TestTxnCommands2.runWorker(hiveConf);