public static HiveConf storePropertiesToHiveConf(Properties properties, HiveConf hiveConf) throws IOException { for (Map.Entry<Object, Object> prop : properties.entrySet()) { if (prop.getValue() instanceof String) { hiveConf.set((String) prop.getKey(), (String) prop.getValue()); } else if (prop.getValue() instanceof Integer) { hiveConf.setInt((String) prop.getKey(), (Integer) prop.getValue()); } else if (prop.getValue() instanceof Boolean) { hiveConf.setBoolean((String) prop.getKey(), (Boolean) prop.getValue()); } else if (prop.getValue() instanceof Long) { hiveConf.setLong((String) prop.getKey(), (Long) prop.getValue()); } else if (prop.getValue() instanceof Float) { hiveConf.setFloat((String) prop.getKey(), (Float) prop.getValue()); } else { LOG.warn("Unsupported type: key=" + prop.getKey() + " value=" + prop.getValue()); } } return hiveConf; }
public HiveSessionImpl(SessionHandle sessionHandle, TProtocolVersion protocol, String username, String password, HiveConf serverConf, String ipAddress, final List<String> forwardedAddresses) { this.username = username; this.password = password; creationTime = System.currentTimeMillis(); this.sessionHandle = sessionHandle != null ? sessionHandle : new SessionHandle(protocol); this.sessionConf = new HiveConf(serverConf); this.ipAddress = ipAddress; this.forwardedAddresses = forwardedAddresses; this.operationLock = serverConf.getBoolVar( ConfVars.HIVE_SERVER2_PARALLEL_OPS_IN_SESSION) ? null : new Semaphore(1); try { // In non-impersonation mode, map scheduler queue to current user // if fair scheduler is configured. if (! sessionConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && sessionConf.getBoolVar(ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE)) { ShimLoader.getHadoopShims().refreshDefaultQueue(sessionConf, username); } } catch (IOException e) { LOG.warn("Error setting scheduler queue: " + e, e); } // Set an explicit session name to control the download directory name sessionConf.set(ConfVars.HIVESESSIONID.varname, this.sessionHandle.getHandleIdentifier().toString()); // Use thrift transportable formatter sessionConf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, ThriftFormatter.class.getName()); sessionConf.setInt(SerDeUtils.LIST_SINK_OUTPUT_PROTOCOL, protocol.getValue()); }
@BeforeClass public static void setUpOneTime() throws Exception { fs = new LocalFileSystem(); fs.initialize(fs.getWorkingDirectory().toUri(), new Configuration()); HiveConf hiveConf = new HiveConf(); hiveConf.setInt(HCatConstants.HCAT_HIVE_CLIENT_EXPIRY_TIME, 0); // Hack to initialize cache with 0 expiry time causing it to return a new hive client every time // Otherwise the cache doesn't play well with the second test method with the client gets closed() in the // tearDown() of the previous test HCatUtil.getHiveMetastoreClient(hiveConf); MapCreate.writeCount = 0; MapRead.readCount = 0; }
@Test(timeout=80000) public void testQueueingWithThreads() throws Exception { final int PART_COUNT = 12; hiveConf.setInt(MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX.getVarname(), 5); hiveConf.setInt(MetastoreConf.ConfVars.STATS_AUTO_UPDATE_WORKER_COUNT.getVarname(), 2); StatsUpdaterThread su = createUpdater(); su.startWorkers(); IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf); hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); executeQuery("create table simple_stats (s string) partitioned by (i int)"); for (int i = 0; i < PART_COUNT; ++i) { executeQuery("insert into simple_stats partition(i='" + i + "') values ('test')"); } verifyPartStatsUpToDate(PART_COUNT, 0, msClient, "simple_stats", false); // Set one of the partitions to be skipped, so that a command is created for every other one. setPartitionSkipProperty(msClient, "simple_stats", "i=0", "true"); assertTrue(su.runOneIteration()); su.waitForQueuedCommands(); verifyStatsUpToDate("simple_stats", "i=0", Lists.newArrayList("s"), msClient, false); verifyPartStatsUpToDate(PART_COUNT, 1, msClient, "simple_stats", true); assertFalse(su.runOneIteration()); drainWorkQueue(su, 0); // Nothing else is updated after the first update. msClient.close(); }
private void setupFileSystem(HadoopShims shims) throws IOException { if (fsType == FsType.local) { fs = FileSystem.getLocal(conf); } else if (fsType == FsType.hdfs || fsType == FsType.encrypted_hdfs) { int numDataNodes = 4; if (fsType == FsType.encrypted_hdfs) { // Set the security key provider so that the MiniDFS cluster is initialized // with encryption conf.set(SECURITY_KEY_PROVIDER_URI_NAME, getKeyProviderURI()); conf.setInt("fs.trash.interval", 50); dfs = shims.getMiniDfs(conf, numDataNodes, true, null); fs = dfs.getFileSystem(); // set up the java key provider for encrypted hdfs cluster hes = shims.createHdfsEncryptionShim(fs, conf); LOG.info("key provider is initialized"); } else { dfs = shims.getMiniDfs(conf, numDataNodes, true, null); fs = dfs.getFileSystem(); } } else { throw new IllegalArgumentException("Unknown or unhandled fsType [" + fsType + "]"); } }
@Test(timeout=40000) public void testParallelOps() throws Exception { hiveConf.setInt(MetastoreConf.ConfVars.STATS_AUTO_UPDATE_WORKER_COUNT.getVarname(), 4); StatsUpdaterThread su = createUpdater(); IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
@VisibleForTesting private HiveConf getDefaultConf( final ConnectorContext connectorContext ) { final HiveConf result = new HiveConf(); result.setBoolean(HiveConfigConstants.USE_METASTORE_LOCAL, true); final int dataStoreTimeout = getDataStoreTimeout(connectorContext); result.setInt(HiveConfigConstants.JAVAX_JDO_DATASTORETIMEOUT, dataStoreTimeout); result.setInt(HiveConfigConstants.JAVAX_JDO_DATASTOREREADTIMEOUT, dataStoreTimeout); result.setInt(HiveConfigConstants.JAVAX_JDO_DATASTOREWRITETIMEOUT, getDataStoreWriteTimeout(connectorContext)); result.setInt(HiveConfigConstants.HIVE_METASTORE_DS_RETRY, 0); result.setInt(HiveConfigConstants.HIVE_HMSHANDLER_RETRY, 0); result.set( HiveConfigConstants.JAVAX_JDO_PERSISTENCEMANAGER_FACTORY_CLASS, HiveConfigConstants.JAVAX_JDO_PERSISTENCEMANAGER_FACTORY ); result.setBoolean(HiveConfigConstants.HIVE_STATS_AUTOGATHER, false); return result; }
@VisibleForTesting private HiveConf getDefaultConf( final ConnectorContext connectorContext ) { final HiveConf result = new HiveConf(); result.setBoolean(HiveConfigConstants.USE_METASTORE_LOCAL, true); final int dataStoreTimeout = getDataStoreTimeout(connectorContext); result.setInt(HiveConfigConstants.JAVAX_JDO_DATASTORETIMEOUT, dataStoreTimeout); result.setInt(HiveConfigConstants.JAVAX_JDO_DATASTOREREADTIMEOUT, dataStoreTimeout); result.setInt(HiveConfigConstants.JAVAX_JDO_DATASTOREWRITETIMEOUT, getDataStoreWriteTimeout(connectorContext)); result.setInt(HiveConfigConstants.HIVE_METASTORE_DS_RETRY, 0); result.setInt(HiveConfigConstants.HIVE_HMSHANDLER_RETRY, 0); result.set( HiveConfigConstants.JAVAX_JDO_PERSISTENCEMANAGER_FACTORY_CLASS, HiveConfigConstants.JAVAX_JDO_PERSISTENCEMANAGER_FACTORY ); result.setBoolean(HiveConfigConstants.HIVE_STATS_AUTOGATHER, false); return result; }
public static HiveConf storePropertiesToHiveConf(Properties properties, HiveConf hiveConf) throws IOException { for (Map.Entry<Object, Object> prop : properties.entrySet()) { if (prop.getValue() instanceof String) { hiveConf.set((String) prop.getKey(), (String) prop.getValue()); } else if (prop.getValue() instanceof Integer) { hiveConf.setInt((String) prop.getKey(), (Integer) prop.getValue()); } else if (prop.getValue() instanceof Boolean) { hiveConf.setBoolean((String) prop.getKey(), (Boolean) prop.getValue()); } else if (prop.getValue() instanceof Long) { hiveConf.setLong((String) prop.getKey(), (Long) prop.getValue()); } else if (prop.getValue() instanceof Float) { hiveConf.setFloat((String) prop.getKey(), (Float) prop.getValue()); } else { LOG.warn("Unsupported type: key=" + prop.getKey() + " value=" + prop.getValue()); } } return hiveConf; }
hiveConf.set((String) prop.getKey(), (String) prop.getValue()); } else if (prop.getValue() instanceof Integer) { hiveConf.setInt((String) prop.getKey(), (Integer) prop.getValue()); } else if (prop.getValue() instanceof Boolean) {
hiveConf.set((String) prop.getKey(), (String) prop.getValue()); } else if (prop.getValue() instanceof Integer) { hiveConf.setInt((String) prop.getKey(), (Integer) prop.getValue()); } else if (prop.getValue() instanceof Boolean) {
hiveConf.set((String) prop.getKey(), (String) prop.getValue()); } else if (prop.getValue() instanceof Integer) { hiveConf.setInt((String) prop.getKey(), (Integer) prop.getValue()); } else if (prop.getValue() instanceof Boolean) {
public BeejuJUnitRule(String databaseName, Map<String, String> configuration) { checkNotNull(databaseName, "databaseName is required"); this.databaseName = databaseName; if (configuration != null && !configuration.isEmpty()) { for (Entry<String, String> entry : configuration.entrySet()) { conf.set(entry.getKey(), entry.getValue()); } } driverClassName = EmbeddedDriver.class.getName(); conf.setBoolean("hcatalog.hive.client.cache.disabled", true); connectionURL = "jdbc:derby:memory:" + UUID.randomUUID() + ";create=true"; conf.setVar(ConfVars.METASTORECONNECTURLKEY, connectionURL); conf.setVar(ConfVars.METASTORE_CONNECTION_DRIVER, driverClassName); conf.setVar(ConfVars.METASTORE_CONNECTION_USER_NAME, METASTORE_DB_USER); conf.setVar(ConfVars.METASTOREPWD, METASTORE_DB_PASSWORD); conf.setBoolVar(ConfVars.HMSHANDLERFORCERELOADCONF, true); // Hive 2.x compatibility conf.setBoolean("datanucleus.schema.autoCreateAll", true); conf.setBoolean("hive.metastore.schema.verification", false); // override default port as some of our test environments claim it is in use. conf.setInt("hive.server2.webui.port", 0); // ConfVars.HIVE_SERVER2_WEBUI_PORT try { // overriding default derby log path to go to tmp String derbyLog = File.createTempFile("derby", ".log").getCanonicalPath(); System.setProperty("derby.stream.error.file", derbyLog); } catch (IOException e) { throw new RuntimeException(e); } }
private void createHiveConf() { config = new HiveConf(); config.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, tempFolderPath); config.set(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST.varname, getHostName()); config.setInt(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT.varname, getPort()); config.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, getMetastoreConnectUrl()); // setting port to -1 to turn the webui off config.setInt(HiveConf.ConfVars.HIVE_SERVER2_WEBUI_PORT.varname, -1); for (Map.Entry<String, String> authConfig : authenticationConfiguration.getAuthenticationConfig().entrySet()) { config.set(authConfig.getKey(), authConfig.getValue()); } }
public HiveSessionImpl(SessionHandle sessionHandle, TProtocolVersion protocol, String username, String password, HiveConf serverConf, String ipAddress, final List<String> forwardedAddresses) { this.username = username; this.password = password; creationTime = System.currentTimeMillis(); this.sessionHandle = sessionHandle != null ? sessionHandle : new SessionHandle(protocol); this.sessionConf = new HiveConf(serverConf); this.ipAddress = ipAddress; this.forwardedAddresses = forwardedAddresses; this.operationLock = serverConf.getBoolVar( ConfVars.HIVE_SERVER2_PARALLEL_OPS_IN_SESSION) ? null : new Semaphore(1); try { // In non-impersonation mode, map scheduler queue to current user // if fair scheduler is configured. if (! sessionConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && sessionConf.getBoolVar(ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE)) { ShimLoader.getHadoopShims().refreshDefaultQueue(sessionConf, username); } } catch (IOException e) { LOG.warn("Error setting scheduler queue: " + e, e); } // Set an explicit session name to control the download directory name sessionConf.set(ConfVars.HIVESESSIONID.varname, this.sessionHandle.getHandleIdentifier().toString()); // Use thrift transportable formatter sessionConf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, ThriftFormatter.class.getName()); sessionConf.setInt(SerDeUtils.LIST_SINK_OUTPUT_PROTOCOL, protocol.getValue()); }
conf.setInt("mapred.task.partition", 1); conf.set("mapred.task.id", taskAttemptIdStr);
public HiveSessionImpl(TProtocolVersion protocol, String username, String password, HiveConf serverhiveConf, String ipAddress) { this.username = username; this.password = password; this.sessionHandle = new SessionHandle(protocol); this.hiveConf = new HiveConf(serverhiveConf); this.ipAddress = ipAddress; try { // In non-impersonation mode, map scheduler queue to current user // if fair scheduler is configured. if (! hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE)) { ShimLoader.getHadoopShims().refreshDefaultQueue(hiveConf, username); } } catch (IOException e) { LOG.warn("Error setting scheduler queue: " + e, e); } // Set an explicit session name to control the download directory name hiveConf.set(ConfVars.HIVESESSIONID.varname, sessionHandle.getHandleIdentifier().toString()); // Use thrift transportable formatter hiveConf.set(ListSinkOperator.OUTPUT_FORMATTER, FetchFormatter.ThriftFormatter.class.getName()); hiveConf.setInt(ListSinkOperator.OUTPUT_PROTOCOL, protocol.getValue()); }
public HiveSessionImpl(TProtocolVersion protocol, String username, String password, HiveConf serverhiveConf, String ipAddress) { this.username = username; this.password = password; this.sessionHandle = new SessionHandle(protocol); this.hiveConf = new HiveConf(serverhiveConf); this.ipAddress = ipAddress; try { // In non-impersonation mode, map scheduler queue to current user // if fair scheduler is configured. if (! hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE)) { ShimLoader.getHadoopShims().refreshDefaultQueue(hiveConf, username); } } catch (IOException e) { LOG.warn("Error setting scheduler queue: " + e, e); } // Set an explicit session name to control the download directory name hiveConf.set(ConfVars.HIVESESSIONID.varname, sessionHandle.getHandleIdentifier().toString()); // Use thrift transportable formatter hiveConf.set(ListSinkOperator.OUTPUT_FORMATTER, FetchFormatter.ThriftFormatter.class.getName()); hiveConf.setInt(ListSinkOperator.OUTPUT_PROTOCOL, protocol.getValue()); }
public HiveSessionImpl(TProtocolVersion protocol, String username, String password, HiveConf serverhiveConf, String ipAddress) { this.username = username; this.password = password; this.sessionHandle = new SessionHandle(protocol); this.hiveConf = new HiveConf(serverhiveConf); this.ipAddress = ipAddress; try { // In non-impersonation mode, map scheduler queue to current user // if fair scheduler is configured. if (! hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE)) { ShimLoader.getHadoopShims().refreshDefaultQueue(hiveConf, username); } } catch (IOException e) { LOG.warn("Error setting scheduler queue: " + e, e); } // Set an explicit session name to control the download directory name hiveConf.set(ConfVars.HIVESESSIONID.varname, sessionHandle.getHandleIdentifier().toString()); // Use thrift transportable formatter hiveConf.set(ListSinkOperator.OUTPUT_FORMATTER, FetchFormatter.ThriftFormatter.class.getName()); hiveConf.setInt(ListSinkOperator.OUTPUT_PROTOCOL, protocol.getValue()); }
public HiveSessionImpl(TProtocolVersion protocol, String username, String password, HiveConf serverhiveConf, String ipAddress) { this.username = username; this.password = password; this.sessionHandle = new SessionHandle(protocol); this.hiveConf = new HiveConf(serverhiveConf); this.ipAddress = ipAddress; try { // In non-impersonation mode, map scheduler queue to current user // if fair scheduler is configured. if (! hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE)) { ShimLoader.getHadoopShims().refreshDefaultQueue(hiveConf, username); } } catch (IOException e) { LOG.warn("Error setting scheduler queue: " + e, e); } // Set an explicit session name to control the download directory name hiveConf.set(ConfVars.HIVESESSIONID.varname, sessionHandle.getHandleIdentifier().toString()); // Use thrift transportable formatter hiveConf.set(ListSinkOperator.OUTPUT_FORMATTER, FetchFormatter.ThriftFormatter.class.getName()); hiveConf.setInt(ListSinkOperator.OUTPUT_PROTOCOL, protocol.getValue()); }