private IMetaStoreClient getMSC() throws HiveException { try { return hive.getMSC(); } catch (MetaException ex) { throw new HiveException(ex); } }
public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws HiveException { try { return getMSC().deleteTableColumnStatistics(dbName, tableName, colName); } catch(Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); } }
@Override @SuppressWarnings("unchecked") public void process(Object row, int tag) throws HiveException { try { res.add(fetcher.convert(row, inputObjInspectors[0])); numRows++; runTimeNumRows++; } catch (Exception e) { throw new HiveException(e); } }
@Override public void merge(AggregationBuffer agg, Object partial) throws HiveException { if(partial == null) { return; } NGramAggBuf myagg = (NGramAggBuf) agg; List partialNGrams = (List) loi.getList(partial); int n = Integer.parseInt(partialNGrams.get(partialNGrams.size()-1).toString()); // A value of 0 for n indicates that the mapper processed data that does not meet // filter criteria, so merge() should be NO-OP. if (n == 0) { return; } if(myagg.n > 0 && myagg.n != n) { throw new HiveException(getClass().getSimpleName() + ": mismatch in value for 'n'" + ", which usually is caused by a non-constant expression. Found '"+n+"' and '" + myagg.n + "'."); } myagg.n = n; partialNGrams.remove(partialNGrams.size()-1); myagg.nge.merge(partialNGrams); }
public List<Partition> dropPartitions(String dbName, String tblName, List<DropTableDesc.PartSpec> partSpecs, PartitionDropOptions dropOptions) throws HiveException { try { Table tbl = getTable(dbName, tblName); List<org.apache.hadoop.hive.metastore.utils.ObjectPair<Integer, byte[]>> partExprs = new ArrayList<>(partSpecs.size()); for (DropTableDesc.PartSpec partSpec : partSpecs) { partExprs.add(new org.apache.hadoop.hive.metastore.utils.ObjectPair<>(partSpec.getPrefixLength(), SerializationUtilities.serializeExpressionToKryo(partSpec.getPartSpec()))); } List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().dropPartitions( dbName, tblName, partExprs, dropOptions); return convertFromMetastore(tbl, tParts); } catch (NoSuchObjectException e) { throw new HiveException("Partition or table doesn't exist.", e); } catch (Exception e) { throw new HiveException(e.getMessage(), e); } }
@Override public void initializeLocalWork(Configuration hconf) throws HiveException { Operator<? extends OperatorDesc> parent = null; for (Operator<? extends OperatorDesc> parentOp : parentOperators) { if (parentOp != null) { parent = parentOp; break; } } if (parent == null) { throw new HiveException("No valid parents."); } if (parentOperators.size() == 1) { Map<Integer, DummyStoreOperator> dummyOps = ((TezContext) (MapredContext.get())).getDummyOpsMap(); for (Entry<Integer, DummyStoreOperator> connectOp : dummyOps.entrySet()) { if (connectOp.getValue().getChildOperators() == null || connectOp.getValue().getChildOperators().isEmpty()) { parentOperators.add(connectOp.getKey(), connectOp.getValue()); connectOp.getValue().getChildOperators().add(this); } } } super.initializeLocalWork(hconf); }
private boolean processKey(byte alias, List<Object> key) throws HiveException { List<Object> keyWritable = keyWritables[alias]; if (keyWritable == null) { // the first group. keyWritables[alias] = key; keyComparators[alias] = new WritableComparator[key.size()]; return false; } else { int cmp = compareKeys(alias, key, keyWritable); if (cmp != 0) { // Cant overwrite existing keys if (nextKeyWritables[alias] != null) { throw new HiveException("Attempting to overwrite nextKeyWritables[" + alias + "]"); } nextKeyWritables[alias] = key; return true; } return false; } }
@Override public void process(Object row, int tag) throws HiveException { if (isSelectStarNoCompute) { forward(row, inputObjInspectors[tag]); return; } int i = 0; try { for (; i < eval.length; ++i) { output[i] = eval[i].evaluate(row); } } catch (HiveException e) { throw e; } catch (RuntimeException e) { throw new HiveException("Error evaluating " + conf.getColList().get(i).getExprString(), e); } forward(output, outputObjInspector); }
throws HiveException { if (!tbl.isPartitioned()) { throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName()); partitions = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(), partialPvals, limit, getUserName(), getGroupNames()); } catch (Exception e) { throw new HiveException(e); qlPartitions.add( new Partition(tbl, p));
private static class ThreadLocalHive extends ThreadLocal<Hive> { @Override protected Hive initialValue() { return null; } @Override public synchronized void set(Hive hiveObj) { Hive currentHive = this.get(); if (currentHive != hiveObj) { // Remove/close current thread-local Hive object before overwriting with new Hive object. remove(); super.set(hiveObj); } } @Override public synchronized void remove() { Hive currentHive = this.get(); if (currentHive != null) { // Close the metastore connections before removing it from thread local hiveDB. currentHive.close(false); super.remove(); } } }
public boolean setPartitionColumnStatistics( SetPartitionsStatsRequest request) throws HiveException { try { ColumnStatistics colStat = request.getColStats().get(0); ColumnStatisticsDesc statsDesc = colStat.getStatsDesc(); Table tbl = getTable(statsDesc.getDbName(), statsDesc.getTableName()); AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); request.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); request.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0); return getMSC().setPartitionColumnStatistics(request); } catch (Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); } }
/** * Clear the context, if anything needs to be done. * **/ public void clearFetchContext() throws HiveException { try { if (currRecReader != null) { currRecReader.close(); currRecReader = null; } closeOperator(); if (context != null) { context.clear(); context = null; } this.currPath = null; this.iterPath = null; this.iterPartDesc = null; this.iterSplits = Collections.emptyIterator(); } catch (Exception e) { throw new HiveException("Failed with exception " + e.getMessage() + StringUtils.stringifyException(e)); } }
private List<SparkStageInfo> getSparkStagesInfo()throws HiveException { Integer sparkJobId = jobHandle.getSparkJobIds().size() == 1 ? jobHandle.getSparkJobIds().get(0) : null; if (sparkJobId == null) { return null; } Future<ArrayList<SparkStageInfo>> getStagesInfo = sparkClient.run( new GetSparkStagesInfoJob(jobHandle.getClientJobId(), sparkJobId)); try { return getStagesInfo.get(sparkClientTimeoutInSeconds, TimeUnit.SECONDS); } catch (TimeoutException e) { throw new HiveException(e, ErrorMsg.SPARK_GET_STAGES_INFO_TIMEOUT, Long.toString(sparkClientTimeoutInSeconds)); } catch (InterruptedException e) { throw new HiveException(e, ErrorMsg.SPARK_GET_STAGES_INFO_INTERRUPTED); } catch (ExecutionException e) { throw new HiveException(e, ErrorMsg.SPARK_GET_STAGES_INFO_EXECUTIONERROR, Throwables.getRootCause(e).getMessage()); } }
/** * Get all the partitions; unlike {@link #getPartitions(Table)}, does not include auth. * @param tbl table for which partitions are needed * @return list of partition objects */ public Set<Partition> getAllPartitionsOf(Table tbl) throws HiveException { if (!tbl.isPartitioned()) { return Sets.newHashSet(new Partition(tbl)); } List<org.apache.hadoop.hive.metastore.api.Partition> tParts; try { tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(), (short)-1); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } Set<Partition> parts = new LinkedHashSet<Partition>(tParts.size()); for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) { parts.add(new Partition(tbl, tpart)); } return parts; }
@Override @SuppressWarnings("unchecked") public void process(Object row, int tag) throws HiveException { try { res.add(fetcher.convert(row, inputObjInspectors[0])); numRows++; runTimeNumRows++; } catch (Exception e) { throw new HiveException(e); } }
public static void writeMmCommitManifest(List<Path> commitPaths, Path specPath, FileSystem fs, String taskId, Long writeId, int stmtId, String unionSuffix, boolean isInsertOverwrite) throws HiveException { if (commitPaths.isEmpty()) { return; } // We assume one FSOP per task (per specPath), so we create it in specPath. Path manifestPath = getManifestDir(specPath, writeId, stmtId, unionSuffix, isInsertOverwrite); manifestPath = new Path(manifestPath, taskId + MANIFEST_EXTENSION); Utilities.FILE_OP_LOGGER.info("Writing manifest to {} with {}", manifestPath, commitPaths); try { // Don't overwrite the manifest... should fail if we have collisions. try (FSDataOutputStream out = fs.create(manifestPath, false)) { if (out == null) { throw new HiveException("Failed to create manifest at " + manifestPath); } out.writeInt(commitPaths.size()); for (Path path : commitPaths) { out.writeUTF(path.toString()); } } } catch (IOException e) { throw new HiveException(e); } }
@Override public void process(Object row, int tag) throws HiveException { if (isSelectStarNoCompute) { forward(row, inputObjInspectors[tag]); return; } int i = 0; try { for (; i < eval.length; ++i) { output[i] = eval[i].evaluate(row); } } catch (HiveException e) { throw e; } catch (RuntimeException e) { throw new HiveException("Error evaluating " + conf.getColList().get(i).getExprString(), e); } forward(output, outputObjInspector); }
public void abortTransactions(List<Long> txnids) throws HiveException { try { getMSC().abortTxns(txnids); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } }
public boolean dropPartition(String dbName, String tableName, List<String> partVals, PartitionDropOptions options) throws HiveException { try { return getMSC().dropPartition(dbName, tableName, partVals, options); } catch (NoSuchObjectException e) { throw new HiveException("Partition or table doesn't exist.", e); } catch (Exception e) { throw new HiveException(e.getMessage(), e); } }
private SparkJobInfo getSparkJobInfo() throws HiveException { Integer sparkJobId = jobHandle.getSparkJobIds().size() == 1 ? jobHandle.getSparkJobIds().get(0) : null; if (sparkJobId == null) { return null; } Future<SparkJobInfo> getJobInfo = sparkClient.run( new GetJobInfoJob(jobHandle.getClientJobId(), sparkJobId)); try { return getJobInfo.get(sparkClientTimeoutInSeconds, TimeUnit.SECONDS); } catch (TimeoutException e) { throw new HiveException(e, ErrorMsg.SPARK_GET_JOB_INFO_TIMEOUT, Long.toString(sparkClientTimeoutInSeconds)); } catch (InterruptedException e) { throw new HiveException(e, ErrorMsg.SPARK_GET_JOB_INFO_INTERRUPTED); } catch (ExecutionException e) { throw new HiveException(e, ErrorMsg.SPARK_GET_JOB_INFO_EXECUTIONERROR, Throwables.getRootCause(e).getMessage()); } }