@Override public void destroy() { coreDriver.destroy(); }
public static void clean(HiveConf conf) { Driver drv = mapDrivers.get(conf); if (drv != null) { drv.destroy(); } mapDrivers.remove(conf); } }
@Override public void close() throws IOException { try { LOG.info("DriverCleanup for LLAP splits: {}", applicationId); driver.releaseLocksAndCommitOrRollback(true); driver.close(); driver.destroy(); txnManager.closeTxnManager(); } catch (Exception err) { LOG.error("Error closing driver resources", err); throw new IOException(err); } }
@AfterClass public static void cleanUp() throws Exception { driver.close(); driver.destroy(); }
@After public void tearDown() throws Exception { if (d != null) { d.close(); d.destroy(); d = null; } }
@Override public void close() { lDrvState.stateLock.lock(); try { releaseDriverContext(); if (lDrvState.driverState == DriverState.COMPILING || lDrvState.driverState == DriverState.EXECUTING) { lDrvState.abort(); } releasePlan(); releaseCachedResult(); releaseFetchTask(); releaseResStream(); releaseContext(); lDrvState.driverState = DriverState.CLOSED; } finally { lDrvState.stateLock.unlock(); LockedDriverState.removeLockedDriverState(); } destroy(); }
"WHEN MATCHED THEN UPDATE SET b = 7 " + "WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b) "; d.destroy(); HiveConf hc = new HiveConf(hiveConf); hc.setVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE, "tez");
@After public void tearDown() throws Exception { try { if (d != null) { dropTables(); d.close(); d.destroy(); d = null; } } finally { TxnDbUtil.cleanDb(hiveConf); FileUtils.deleteDirectory(new File(getTestDataDir())); } } protected String getWarehouseDir() {
public static Task<?> createRootTask( HiveConf builderConf, Set<ReadEntity> inputs, Set<WriteEntity> outputs, StringBuilder command, LinkedHashMap<String, String> partSpec, String indexTableName, String dbName){ // Don't try to index optimize the query to build the index HiveConf.setBoolVar(builderConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER, false); Driver driver = new Driver(builderConf, SessionState.get().getUserName()); driver.compile(command.toString(), false); Task<?> rootTask = driver.getPlan().getRootTasks().get(0); inputs.addAll(driver.getPlan().getInputs()); outputs.addAll(driver.getPlan().getOutputs()); IndexMetadataChangeWork indexMetaChange = new IndexMetadataChangeWork(partSpec, indexTableName, dbName); IndexMetadataChangeTask indexMetaChangeTsk = (IndexMetadataChangeTask) TaskFactory.get(indexMetaChange, builderConf); indexMetaChangeTsk.setWork(indexMetaChange); rootTask.addDependentTask(indexMetaChangeTsk); driver.destroy(); return rootTask; }
@After public void tearDown() throws Exception { try { if (d != null) { dropTables(); d.close(); d.destroy(); d = null; } TxnDbUtil.cleanDb(hiveConf); } finally { FileUtils.deleteDirectory(new File(TEST_DATA_DIR)); } } @Test
driver.destroy();
@Test public void testDynamicPartitionsMerge() throws Exception { d.destroy(); hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p) values(1,1,'p1'),(2,2,'p1'),(3,3,'p1'),(4,4,'p2')"); List<String> r1 = runStatementOnDriver("select count(*) from " + Table.ACIDTBLPART); Assert.assertEquals("4", r1.get(0)); int[][] sourceVals = {{2,15},{4,44},{5,5},{11,11}}; runStatementOnDriver("insert into " + Table.NONACIDORCTBL + " " + makeValuesClause(sourceVals)); runStatementOnDriver("merge into " + Table.ACIDTBLPART + " using " + Table.NONACIDORCTBL + " as s ON " + Table.ACIDTBLPART + ".a = s.a " + "when matched then update set b = s.b " + "when not matched then insert values(s.a, s.b, 'new part')"); r1 = runStatementOnDriver("select p,a,b from " + Table.ACIDTBLPART + " order by p, a, b"); String result= r1.toString(); Assert.assertEquals("[new part\t5\t5, new part\t11\t11, p1\t1\t1, p1\t2\t15, p1\t3\t3, p2\t4\t44]", result); //note: inserts go into 'new part'... so this won't fail assertUniqueID(Table.ACIDTBLPART); } /**
@Test public void testMultiInsert() throws Exception { runStatementOnDriver("create temporary table if not exists data1 (x int)"); runStatementOnDriver("insert into data1 values (1),(2),(1)"); d.destroy(); hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); d = new Driver(hiveConf); runStatementOnDriver(" from data1 " + "insert into " + Table.ACIDTBLPART + " partition(p) select 0, 0, 'p' || x " + "insert into " + Table.ACIDTBLPART + " partition(p='p1') select 0, 1"); /** * Using {@link BucketCodec.V0} the output * is missing 1 of the (p1,0,1) rows because they have the same ROW__ID and only differ by * StatementId so {@link org.apache.hadoop.hive.ql.io.orc.OrcRawRecordMerger} skips one. * With split update (and V0), the data is read correctly (insert deltas are now the base) but we still * should get duplicate ROW__IDs. */ List<String> r = runStatementOnDriver("select p,a,b from " + Table.ACIDTBLPART + " order by p, a, b"); Assert.assertEquals("[p1\t0\t0, p1\t0\t0, p1\t0\t1, p1\t0\t1, p1\t0\t1, p2\t0\t0]", r.toString()); assertUniqueID(Table.ACIDTBLPART); /** * this delete + select covers VectorizedOrcAcidRowBatchReader */ runStatementOnDriver("delete from " + Table.ACIDTBLPART); r = runStatementOnDriver("select p,a,b from " + Table.ACIDTBLPART + " order by p, a, b"); Assert.assertEquals("[]", r.toString()); } /**
@Ignore public void testDynamicPartitions() throws Exception { d.destroy(); hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
/** * Using nested partitions and thus DummyPartition * @throws Exception */ @Test public void testDynamicPartitionsMerge2() throws Exception { d.destroy(); hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); int[][] targetVals = {{1,1,1},{2,2,2},{3,3,1},{4,4,2}}; runStatementOnDriver("insert into " + Table.ACIDNESTEDPART + " partition(p=1,q) " + makeValuesClause(targetVals)); List<String> r1 = runStatementOnDriver("select count(*) from " + Table.ACIDNESTEDPART); Assert.assertEquals("4", r1.get(0)); int[][] sourceVals = {{2,15},{4,44},{5,5},{11,11}}; runStatementOnDriver("insert into " + Table.NONACIDORCTBL + " " + makeValuesClause(sourceVals)); runStatementOnDriver("merge into " + Table.ACIDNESTEDPART + " using " + Table.NONACIDORCTBL + " as s ON " + Table.ACIDNESTEDPART + ".a = s.a " + "when matched then update set b = s.b " + "when not matched then insert values(s.a, s.b, 3,4)"); r1 = runStatementOnDriver("select p,q,a,b from " + Table.ACIDNESTEDPART + " order by p,q, a, b"); Assert.assertEquals(stringifyValues(new int[][] {{1,1,1,1},{1,1,3,3},{1,2,2,15},{1,2,4,44},{3,4,5,5},{3,4,11,11}}), r1); //insert of merge lands in part (3,4) - no updates land there assertUniqueID(Table.ACIDNESTEDPART); } @Ignore("Covered elsewhere")
private void runSparkTestSession(HiveConf conf, int threadId) throws Exception { conf.setVar(HiveConf.ConfVars.SPARK_SESSION_TIMEOUT, "10s"); conf.setVar(HiveConf.ConfVars.SPARK_SESSION_TIMEOUT_PERIOD, "1s"); Driver driver = null; try { driver = new Driver(new QueryState.Builder() .withGenerateNewQueryId(true) .withHiveConf(conf).build(), null, null); SparkSession sparkSession = SparkUtilities.getSparkSession(conf, SparkSessionManagerImpl.getInstance()); Assert.assertEquals(0, driver.run("show tables").getResponseCode()); barrier.await(); SparkContext sparkContext = getSparkContext(sparkSession); Assert.assertFalse(sparkContext.isStopped()); if(threadId == 1) { barrier.await(); closeSparkSession(sparkSession); Assert.assertTrue(sparkContext.isStopped()); } else { closeSparkSession(sparkSession); Assert.assertFalse(sparkContext.isStopped()); barrier.await(); } } finally { if (driver != null) { driver.destroy(); } } }
@Test public void testMoreBucketsThanReducers() throws Exception { //see bucket_num_reducers.q bucket_num_reducers2.q // todo: try using set VerifyNumReducersHook.num.reducers=10; d.destroy(); HiveConf hc = new HiveConf(hiveConf); hc.setIntVar(HiveConf.ConfVars.MAXREDUCERS, 1); //this is used in multiple places, SemanticAnalyzer.getBucketingSortingDest() among others hc.setIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS, 1); hc.setBoolVar(HiveConf.ConfVars.HIVE_EXPLAIN_USER, false); d = new Driver(hc); d.setMaxRows(10000); runStatementOnDriver("insert into " + Table.ACIDTBL + " values(1,1)");//txn X write to bucket1 runStatementOnDriver("insert into " + Table.ACIDTBL + " values(0,0),(3,3)");// txn X + 1 write to bucket0 + bucket1 runStatementOnDriver("update " + Table.ACIDTBL + " set b = -1"); List<String> r = runStatementOnDriver("select * from " + Table.ACIDTBL + " order by a, b"); int[][] expected = {{0, -1}, {1, -1}, {3, -1}}; Assert.assertEquals(stringifyValues(expected), r); } @Ignore("Moved to Tez")
@Test public void testBucketCodec() throws Exception { d.destroy(); //insert data in "legacy" format hiveConf.setIntVar(HiveConf.ConfVars.TESTMODE_BUCKET_CODEC_VERSION, 0); d = new Driver(hiveConf); int[][] targetVals = {{2,1},{4,3},{5,6},{7,8}}; runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(targetVals)); d.destroy(); hiveConf.setIntVar(HiveConf.ConfVars.TESTMODE_BUCKET_CODEC_VERSION, 1); d = new Driver(hiveConf); //do some operations with new format runStatementOnDriver("update " + Table.ACIDTBL + " set b=11 where a in (5,7)"); runStatementOnDriver("insert into " + Table.ACIDTBL + " values(11,11)"); runStatementOnDriver("delete from " + Table.ACIDTBL + " where a = 7"); //make sure we get the right data back before/after compactions List<String> r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); int[][] rExpected = {{2,1},{4,3},{5,11},{11,11}}; Assert.assertEquals(stringifyValues(rExpected), r); runStatementOnDriver("ALTER TABLE " + Table.ACIDTBL + " COMPACT 'MINOR'"); runWorker(hiveConf); r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); Assert.assertEquals(stringifyValues(rExpected), r); runStatementOnDriver("ALTER TABLE " + Table.ACIDTBL + " COMPACT 'MAJOR'"); runWorker(hiveConf); r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); Assert.assertEquals(stringifyValues(rExpected), r); } /**
@Test public void testMergeUpdateDeleteNoCardCheck() throws Exception { d.destroy(); HiveConf hc = new HiveConf(hiveConf); hc.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, false); d = new Driver(hc); d.setMaxRows(10000); int[][] baseValsOdd = {{2,2},{4,44},{5,5},{11,11}}; runStatementOnDriver("insert into " + Table.NONACIDORCTBL + " " + makeValuesClause(baseValsOdd)); int[][] vals = {{2,1},{4,3},{5,6},{7,8}}; runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(vals)); String query = "merge into " + Table.ACIDTBL + " as t using " + Table.NONACIDORCTBL + " s ON t.a = s.a " + "WHEN MATCHED AND s.a < 3 THEN update set b = 0 " + "WHEN MATCHED and t.a > 3 and t.a < 5 THEN DELETE "; runStatementOnDriver(query); List<String> r = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b"); int[][] rExpected = {{2,0},{5,6},{7,8}}; Assert.assertEquals(stringifyValues(rExpected), r); } @Test
@Ignore("Moved to Tez") @Test public void testMoreBucketsThanReducers2() throws Exception { //todo: try using set VerifyNumReducersHook.num.reducers=10; //see bucket_num_reducers.q bucket_num_reducers2.q d.destroy(); HiveConf hc = new HiveConf(hiveConf); hc.setIntVar(HiveConf.ConfVars.MAXREDUCERS, 2); //this is used in multiple places, SemanticAnalyzer.getBucketingSortingDest() among others hc.setIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS, 2); d = new Driver(hc); d.setMaxRows(10000); runStatementOnDriver("create table fourbuckets (a int, b int) clustered by (a) into 4 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); //below value for a is bucket id, for b - txn id (logically) runStatementOnDriver("insert into fourbuckets values(0,1),(1,1)");//txn X write to b0 + b1 runStatementOnDriver("insert into fourbuckets values(2,2),(3,2)");// txn X + 1 write to b2 + b3 runStatementOnDriver("insert into fourbuckets values(0,3),(1,3)");//txn X + 2 write to b0 + b1 runStatementOnDriver("insert into fourbuckets values(2,4),(3,4)");//txn X + 3 write to b2 + b3 //so with 2 FileSinks and 4 buckets, FS1 should see (0,1),(2,2),(0,3)(2,4) since data is sorted by ROW__ID where tnxid is the first component //FS2 should see (1,1),(3,2),(1,3),(3,4) runStatementOnDriver("update fourbuckets set b = -1"); List<String> r = runStatementOnDriver("select * from fourbuckets order by a, b"); int[][] expected = {{0, -1},{0, -1}, {1, -1}, {1, -1}, {2, -1}, {2, -1}, {3, -1}, {3, -1}}; Assert.assertEquals(stringifyValues(expected), r); } @Test