/** * We have to use a different query to check results for Vectorized tests because to get the * file name info we need to use {@link org.apache.hadoop.hive.ql.metadata.VirtualColumn#FILENAME} * which will currently make the query non-vectorizable. This means we can't check the file name * for vectorized version of the test. */ protected void checkResult(String[][] expectedResult, String query, boolean isVectorized, String msg, Logger LOG) throws Exception{ List<String> rs = runStatementOnDriver(query); checkExpected(rs, expectedResult, msg + (isVectorized ? " vect" : ""), LOG, !isVectorized); assertVectorized(isVectorized, query); } void dropTable(String[] tabs) throws Exception {
protected String getWarehouseDir() { return getTestDataDir() + "/warehouse"; } protected abstract String getTestDataDir();
public static void runCleaner(HiveConf hiveConf) throws Exception { TxnCommandsBaseForTests.runCleaner(hiveConf); } public static void runInitiator(HiveConf hiveConf) throws Exception {
void setUpInternal() throws Exception { initHiveConf(); Path workDir = new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test" + File.separator + "tmp")); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, getWarehouseDir()); hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); hiveConf TxnDbUtil.setConfValues(hiveConf); TxnDbUtil.prepDb(hiveConf); File f = new File(getWarehouseDir()); if (f.exists()) { FileUtil.fullyDelete(f); if (!(new File(getWarehouseDir()).mkdirs())) { throw new RuntimeException("Could not create " + getWarehouseDir()); d = new Driver(new QueryState.Builder().withHiveConf(hiveConf).nonIsolated().build(), null); d.setMaxRows(10000); dropTables(); runStatementOnDriver("create table " + Table.ACIDTBL + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='true')"); runStatementOnDriver("create table " + Table.ACIDTBLPART + "(a int, b int) partitioned by (p string) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='true')"); runStatementOnDriver("create table " + Table.NONACIDORCTBL + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='false')"); runStatementOnDriver("create table " + Table.NONACIDORCTBL2 + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='false')"); runStatementOnDriver("create temporary table " + Table.ACIDTBL2 + "(a int, b int, c int) clustered by (c) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='true')"); runStatementOnDriver("create table " + Table.NONACIDNONBUCKET + "(a int, b int) stored as orc TBLPROPERTIES ('transactional'='false')");
@After public void tearDown() throws Exception { try { if (d != null) { dropTables(); d.close(); d.destroy(); d = null; } } finally { TxnDbUtil.cleanDb(hiveConf); FileUtils.deleteDirectory(new File(getTestDataDir())); } } protected String getWarehouseDir() {
private void checkExpected(List<String> rs, String[][] expected, String msg) { super.checkExpected(rs, expected, msg, LOG, true); } /**
public static void runWorker(HiveConf hiveConf) throws Exception { runCompactorThread(hiveConf, CompactorThreadType.WORKER); } public static void runCleaner(HiveConf hiveConf) throws Exception {
public static void runInitiator(HiveConf hiveConf) throws Exception { TxnCommandsBaseForTests.runInitiator(hiveConf); } /**
@Override void initHiveConf() { super.initHiveConf(); //TestTxnCommandsWithSplitUpdateAndVectorization has the vectorized version //of these tests. hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); }
void checkExpected(List<String> rs, String[][] expected, String msg, Logger LOG, boolean checkFileName) { LOG.warn(testName.getMethodName() + ": read data(" + msg + "): "); logResult(LOG, rs); Assert.assertEquals(testName.getMethodName() + ": " + msg + "; " + rs, expected.length, rs.size()); //verify data and layout for(int i = 0; i < expected.length; i++) { Assert.assertTrue("Actual line (data) " + i + " data: " + rs.get(i) + "; expected " + expected[i][0], rs.get(i).startsWith(expected[i][0])); if(checkFileName) { Assert.assertTrue("Actual line(file) " + i + " file: " + rs.get(i), rs.get(i).endsWith(expected[i][1]) || rs.get(i).matches(expected[i][1])); } } } void logResult(Logger LOG, List<String> rs) {
protected void dropTables() throws Exception { super.dropTables(); for(TestTxnCommandsForMmTable.TableExtended t : TestTxnCommandsForMmTable.TableExtended.values()) { runStatementOnDriver("drop table if exists " + t); } } /**
private void checkExpected(List<String> rs, String[][] expected, String msg) { super.checkExpected(rs, expected, msg, LOG, true); } @Test
public static void runCleaner(HiveConf hiveConf) throws Exception { runCompactorThread(hiveConf, CompactorThreadType.CLEANER); } public static void runInitiator(HiveConf hiveConf) throws Exception {
private void checkExpected(List<String> rs, String[][] expected, String msg) { super.checkExpected(rs, expected, msg, LOG, true); }
public static void runInitiator(HiveConf hiveConf) throws Exception { runCompactorThread(hiveConf, CompactorThreadType.INITIATOR); } private enum CompactorThreadType {INITIATOR, WORKER, CLEANER}