Refine search
if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_METRICS_ENABLED)) { MetricsFactory.init(hiveConf); hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST.varname, getServerHost()); } catch (Throwable t) { throw new Error("Unable to initialize HiveServer2", t); if (HiveConf.getBoolVar(hiveConf, ConfVars.LLAP_HS2_ENABLE_COORDINATOR)) { + " Not configured for SSL connection"); builder.setKeyStorePassword(ShimLoader.getHadoopShims().getPassword( hiveConf, ConfVars.HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD.varname)); builder.setKeyStorePath(keyStorePath);
@Override public void checkOutputSpecs(FileSystem fs, JobConf jc) throws IOException { //obtain delegation tokens for the job if (UserGroupInformation.getCurrentUser().hasKerberosCredentials()) { TableMapReduceUtil.initCredentials(jc); } String hbaseTableName = jc.get(HBaseSerDe.HBASE_TABLE_NAME); jc.set(TableOutputFormat.OUTPUT_TABLE, hbaseTableName); Job job = new Job(jc); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); try { checkOutputSpecs(jobContext); } catch (InterruptedException e) { throw new IOException(e); } }
int maxDynamicPartitions = jobInfo.getMaxDynamicPartitions(); Path loadPath = new Path(jobInfo.getLocation()); FileSystem fs = loadPath.getFileSystem(context.getConfiguration()); String dynPathSpec = loadPath.toUri().getPath(); dynPathSpec = dynPathSpec.replaceAll("__HIVE_DEFAULT_PARTITION__", "*"); Path pathPattern = new Path(dynPathSpec); FileStatus[] status = fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER); LinkedHashMap<String, String> fullPartSpec = new LinkedHashMap<String, String>(); if (!customDynamicLocationUsed) { Warehouse.makeSpecFromName(fullPartSpec, st.getPath(), null); } else { HCatFileUtil.getPartKeyValuesForCustomLocation(fullPartSpec, jobInfo, st.getPath().toString()); partitionsDiscoveredByPath.put(st.getPath().toString(), fullPartSpec); JobConf jobConf = (JobConf)context.getConfiguration(); JobContext currContext = HCatMapRedUtil.createJobContext( context.getJobID(), InternalUtil.createReporter(HCatMapRedUtil.createTaskAttemptContext(jobConf, ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptID()))); HCatOutputFormat.configureOutputStorageHandler(currContext, jobInfo, fullPartSpec); contextDiscoveredByPath.put(st.getPath().toString(), currContext);
try { Path location = new Path(table.getSd().getLocation()); FileSystem fs = location.getFileSystem(conf); HadoopShims.HdfsEncryptionShim shim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf); if (!shim.isPathEncrypted(location)) { HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(conf, fs, location); FileStatus targetStatus = fs.getFileStatus(location); String targetGroup = targetStatus == null ? null : targetStatus.getGroup(); FileUtils.moveToTrash(fs, location, conf, isAutopurge); fs.mkdirs(location); HdfsUtils.setFullFileStatus(conf, status, targetGroup, fs, location, false); } else { FileStatus[] statuses = fs.listStatus(location, FileUtils.HIDDEN_FILES_PATH_FILTER); if ((statuses != null) && (statuses.length > 0)) { boolean success = Hive.trashFiles(fs, statuses, conf, isAutopurge); if (!success) { throw new HiveException("Error in deleting the contents of " + location.toString());
partPath = new Path(dynPartPath); } else if (!dynamicPartitioningUsed && Boolean.parseBoolean((String)table.getProperty("EXTERNAL")) partPath = new Path(finalLocn); } else { partPath = new Path(partLocnRoot); int i = 0; for (FieldSchema partKey : table.getPartitionKeys()) { if (i++ != 0) { fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check applyGroupAndPerms(fs, partPath, perms, grpName, false); fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check if (!ShimLoader.getHadoopShims().getHCatShim().isFileInHDFS(fs, partPath)) { applyGroupAndPerms(fs, partPath, perms, grpName, true);
runOptions.shouldModifyManagedTableLocation = false; } else { String curWarehouseRoot = HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE); if (arePathsEqual(conf, runOptions.oldWarehouseRoot, curWarehouseRoot)) { LOG.info("oldWarehouseRoot is the same as the current warehouse root {}." runOptions.shouldModifyManagedTableLocation = false; } else { Path oldWhRootPath = new Path(runOptions.oldWarehouseRoot); curWhRootPath = new Path(curWarehouseRoot); FileSystem oldWhRootFs = oldWhRootPath.getFileSystem(conf); FileSystem curWhRootFs = curWhRootPath.getFileSystem(conf); oldWhRootPath = oldWhRootFs.makeQualified(oldWhRootPath); curWhRootPath = curWhRootFs.makeQualified(curWhRootPath); if (!FileUtils.equalsFileSystem(oldWhRootFs, curWhRootFs)) { LOG.info("oldWarehouseRoot {} has a different FS than the current warehouse root {}." if (!isHdfs(oldWhRootFs)) { LOG.info("Warehouse is using non-HDFS FileSystem {}. Disabling shouldModifyManagedTableLocation", oldWhRootFs.getUri()); runOptions.shouldModifyManagedTableLocation = false; } else { encryptionShim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(oldWhRootFs, conf); if (!hasEquivalentEncryption(encryptionShim, oldWhRootPath, curWhRootPath)) { LOG.info("oldWarehouseRoot {} and current warehouse root {} have different encryption zones." + HiveConf.setVar(oldWhConf, HiveConf.ConfVars.METASTOREWAREHOUSE, runOptions.oldWarehouseRoot); oldWh = new Warehouse(oldWhConf);
private void writeThenReadByRecordReader(int intervalRecordCount, int writeCount, int splitNumber, long maxSplitSize, CompressionCodec codec) throws IOException, InterruptedException { Path testDir = new Path(System.getProperty("test.tmp.dir", ".") + "/mapred/testsmallfirstsplit"); Path testFile = new Path(testDir, "test_rcfile"); fs.delete(testFile, true); Configuration cloneConf = new Configuration(conf); RCFileOutputFormat.setColumnNumber(cloneConf, bytesArray.length); jonconf.set("mapred.input.dir", testDir.toString()); JobContext context = new Job(jonconf); HiveConf.setLongVar(context.getConfiguration(), HiveConf.ConfVars.MAPREDMAXSPLITSIZE, maxSplitSize); List<InputSplit> splits = inputFormat.getSplits(context); int readCount = 0; for (int i = 0; i < splits.size(); i++) { TaskAttemptContext tac = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(jonconf, new TaskAttemptID()); RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
stat = getFs(path).getFileStatus(path); } catch (FileNotFoundException fnfe){ ugi = ShimLoader.getHadoopShims().getUGIForConf(conf); } catch (LoginException le) { throw new IOException(le); String user = ShimLoader.getHadoopShims().getShortUserName(ugi); if (stat.getOwner().equals(user) && stat.getPermission().getUserAction().implies(FsAction.WRITE)) { return true; if (stat.getPermission().getGroupAction().implies(FsAction.WRITE)) { String[] groups = ugi.getGroupNames(); if (ArrayUtils.contains(groups, stat.getGroup())) {
trashEnabled = 0 < hiveConf.getFloat("fs.trash.interval", -1); } catch(NumberFormatException ex) { try { HadoopShims.HdfsEncryptionShim shim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(FileSystem.get(hiveConf), hiveConf); if (shim.isPathEncrypted(pathToData)) { throw new MetaException("Unable to drop " + objectName + " because it is in an encryption zone" +
public HadoopShims.HdfsEncryptionShim getHdfsEncryptionShim() throws HiveException { if (hdfsEncryptionShim == null) { try { FileSystem fs = FileSystem.get(conf); if ("hdfs".equals(fs.getUri().getScheme())) { hdfsEncryptionShim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf); } else { LOG.info("Could not get hdfsEncryptionShim, it is only applicable to hdfs filesystem."); } } catch (Exception e) { throw new HiveException(e); } } return hdfsEncryptionShim; }
jobConf, HBaseStorageHandler.class, TableInputFormatBase.class); if (HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVE_HBASE_SNAPSHOT_NAME) != null) { Set<String> merged = new LinkedHashSet<String>(jobConf.getStringCollection("tmpjars")); jobConf.set("tmpjars", StringUtils.arrayToString(merged.toArray(new String[0]))); JobConf hbaseJobConf = new JobConf(getConf()); org.apache.hadoop.hbase.mapred.TableMapReduceUtil.initCredentials(hbaseJobConf); ShimLoader.getHadoopShims().mergeCredentials(jobConf, hbaseJobConf); } catch (Exception e) { throw new RuntimeException(e);
public HiveSessionImpl(SessionHandle sessionHandle, TProtocolVersion protocol, String username, String password, HiveConf serverConf, String ipAddress, final List<String> forwardedAddresses) { this.username = username; this.password = password; creationTime = System.currentTimeMillis(); this.sessionHandle = sessionHandle != null ? sessionHandle : new SessionHandle(protocol); this.sessionConf = new HiveConf(serverConf); this.ipAddress = ipAddress; this.forwardedAddresses = forwardedAddresses; this.operationLock = serverConf.getBoolVar( ConfVars.HIVE_SERVER2_PARALLEL_OPS_IN_SESSION) ? null : new Semaphore(1); try { // In non-impersonation mode, map scheduler queue to current user // if fair scheduler is configured. if (! sessionConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && sessionConf.getBoolVar(ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE)) { ShimLoader.getHadoopShims().refreshDefaultQueue(sessionConf, username); } } catch (IOException e) { LOG.warn("Error setting scheduler queue: " + e, e); } // Set an explicit session name to control the download directory name sessionConf.set(ConfVars.HIVESESSIONID.varname, this.sessionHandle.getHandleIdentifier().toString()); // Use thrift transportable formatter sessionConf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, ThriftFormatter.class.getName()); sessionConf.setInt(SerDeUtils.LIST_SINK_OUTPUT_PROTOCOL, protocol.getValue()); }
/** * Merge the provided <code>Token</code> into the JobConf. * * @param jobConf * JobConf to merge token into * @param accumuloToken * The Token * @throws IOException * If the merging fails */ public void mergeTokenIntoJobConf(JobConf jobConf, Token<?> accumuloToken) throws IOException { JobConf accumuloJobConf = new JobConf(jobConf); accumuloJobConf.getCredentials().addToken(accumuloToken.getService(), accumuloToken); // Merge them together. ShimLoader.getHadoopShims().mergeCredentials(jobConf, accumuloJobConf); }
while (true) { LOG.info("Starting HiveServer2"); HiveConf hiveConf = new HiveConf(); maxAttempts = hiveConf.getLongVar(HiveConf.ConfVars.HIVE_SERVER2_MAX_START_ATTEMPTS); HiveServer2 server = null; try { server.init(hiveConf); server.start(); ShimLoader.getHadoopShims().startPauseMonitor(hiveConf); if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY)) { server.addServerInstanceToZooKeeper(hiveConf);
hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE); int responseHeaderSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE); conf.setRequestHeaderSize(requestHeaderSize); conf.setResponseHeaderSize(responseHeaderSize); boolean useSsl = hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_USE_SSL); String schemeName = useSsl ? "https" : "http"; String keyStorePath = hiveConf.getVar(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH).trim(); String keyStorePassword = ShimLoader.getHadoopShims().getPassword(hiveConf, HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname); if (keyStorePath.isEmpty()) {
private static Connection getConnection() throws Exception { HiveConf conf = new HiveConf(); String jdbcDriver = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER); Driver driver = (Driver) Class.forName(jdbcDriver).newInstance(); Properties prop = new Properties(); String driverUrl = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORECONNECTURLKEY); String user = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME); String passwd = ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.METASTOREPWD.varname); prop.setProperty("user", user); prop.setProperty("password", passwd); return driver.connect(driverUrl, prop); }
public HadoopShims.HdfsEncryptionShim getHdfsEncryptionShim(FileSystem fs) throws HiveException { if (!hdfsEncryptionShims.containsKey(fs.getUri())) { try { if ("hdfs".equals(fs.getUri().getScheme())) { hdfsEncryptionShims.put(fs.getUri(), ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, sessionConf)); } else { LOG.info("Could not get hdfsEncryptionShim, it is only applicable to hdfs filesystem."); } } catch (Exception e) { throw new HiveException(e); } } return hdfsEncryptionShims.get(fs.getUri()); }
String hbaseTableName = jobConf.get(HBaseSerDe.HBASE_TABLE_NAME); initializeTable(conn, tableName); String hbaseColumnsMapping = jobConf.get(HBaseSerDe.HBASE_COLUMNS_MAPPING); boolean doColumnRegexMatching = jobConf.getBoolean(HBaseSerDe.HBASE_COLUMNS_REGEX_MATCHING, true); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); Path[] tablePaths = FileInputFormat.getInputPaths(jobContext);
@Override public void configureJobConf(TableDesc tableDesc, JobConf jobConf) { try { TableMapReduceUtil.addDependencyJars(jobConf); org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(jobConf, PhoenixStorageHandler.class); JobConf hbaseJobConf = new JobConf(getConf()); org.apache.hadoop.hbase.mapred.TableMapReduceUtil.initCredentials(hbaseJobConf); ShimLoader.getHadoopShims().mergeCredentials(jobConf, hbaseJobConf); } catch (IOException e) { throw new RuntimeException(e); } }
private void setupFileSystem(HadoopShims shims) throws IOException { if (fsType == FsType.local) { fs = FileSystem.getLocal(conf); } else if (fsType == FsType.hdfs || fsType == FsType.encrypted_hdfs) { int numDataNodes = 4; if (fsType == FsType.encrypted_hdfs) { // Set the security key provider so that the MiniDFS cluster is initialized // with encryption conf.set(SECURITY_KEY_PROVIDER_URI_NAME, getKeyProviderURI()); conf.setInt("fs.trash.interval", 50); dfs = shims.getMiniDfs(conf, numDataNodes, true, null); fs = dfs.getFileSystem(); // set up the java key provider for encrypted hdfs cluster hes = shims.createHdfsEncryptionShim(fs, conf); LOG.info("key provider is initialized"); } else { dfs = shims.getMiniDfs(conf, numDataNodes, true, null); fs = dfs.getFileSystem(); } } else { throw new IllegalArgumentException("Unknown or unhandled fsType [" + fsType + "]"); } }