@Override protected AggrStats getJdoResult(GetHelper<AggrStats> ctx) throws MetaException, NoSuchObjectException { // This is fast path for query optimizations, if we can find this info // quickly using // directSql, do it. No point in failing back to slow path here. throw new MetaException("Jdo path is not implemented for stats aggr."); } @Override
public void setError(String errorMessage) throws MetaException { this.errorMessage = errorMessage; if (expectNoErrors) { throw new MetaException(errorMessage); } } }
@SuppressWarnings("unchecked") static List<Object[]> ensureList(Object result) throws MetaException { if (!(result instanceof List<?>)) { throw new MetaException("Wrong result type " + result.getClass()); } return (List<Object[]>)result; }
public static void makeDir(Path path, Configuration conf) throws MetaException { FileSystem fs; try { fs = path.getFileSystem(conf); if (!fs.exists(path)) { fs.mkdirs(path); } } catch (IOException e) { throw new MetaException("Unable to : " + path); } }
protected void checkBuildable(String defaultConstraintName, Configuration conf) throws MetaException { if (tableName == null || columns.isEmpty()) { throw new MetaException("You must provide table name and columns"); } if (constraintName == null) { constraintName = tableName + "_" + defaultConstraintName; } if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf); }
private static MetaException newMetaException(Exception e) { if (e instanceof MetaException) { return (MetaException)e; } MetaException me = new MetaException(e.toString()); me.initCause(e); return me; }
public static int getArchivingLevel(Partition part) throws MetaException { if (!isArchived(part)) { throw new MetaException("Getting level of unarchived partition"); } String lv = part.getParameters().get(ARCHIVING_LEVEL); if (lv != null) { return Integer.parseInt(lv); } // partitions archived before introducing multiple archiving return part.getValues().size(); }
@Override public String getMetaConf(String key) throws MetaException { ConfVars confVar = MetastoreConf.getMetaConf(key); if (confVar == null) { throw new MetaException("Invalid configuration key " + key); } return getConf().get(key, confVar.getDefaultVal().toString()); }
@Override public Map<String, Type> get_type_all(String name) throws MetaException { // TODO Auto-generated method stub startFunction("get_type_all", ": " + name); endFunction("get_type_all", false, null); throw new MetaException("Not yet implemented"); }
private MetaException metaException(HiveException e) { MetaException ex = new MetaException(e.getMessage()); ex.initCause(e); return ex; }
void addToChangeManagement(Path file) throws MetaException { try { cm.recycle(file, RecycleType.COPY, true); } catch (IOException e) { throw new MetaException(org.apache.hadoop.util.StringUtils.stringifyException(e)); } }
public void recycleDirToCmPath(Path f, boolean ifPurge) throws MetaException { try { cm.recycle(f, RecycleType.MOVE, ifPurge); } catch (IOException e) { throw new MetaException(org.apache.hadoop.util.StringUtils.stringifyException(e)); } }
@Override public void preAlterTable(Table table, EnvironmentContext context) throws MetaException { String alterOpType = context == null ? null : context.getProperties().get(ALTER_TABLE_OPERATION_TYPE); // alterOpType is null in case of stats update if (alterOpType != null && !ALLOWED_ALTER_TYPES.contains(alterOpType)) { throw new MetaException("ALTER TABLE can not be used for " + alterOpType + " to a non-native table "); } if (DruidKafkaUtils.isKafkaStreamingTable(table)) { updateKafkaIngestion(table); } }
@Override List<Partition> getPartitionsByNames(CompactionInfo ci) throws MetaException { try { return msc.getPartitionsByNames(getDefaultCatalog(conf), ci.dbname, ci.tableName, Collections.singletonList(ci.partName)); } catch (TException e) { LOG.error("Unable to get partitions by name for CompactionInfo=" + ci); throw new MetaException(e.toString()); } } }
private void assertPartitioned() throws MetaException { if(tTable.getPartitionKeysSize() <= 0) { throw new MetaException(Warehouse.getQualifiedName(tTable) + " is not partitioned"); } }
private void checkLimitNumberOfPartitions(String tblName, int numPartitions, int maxToFetch) throws MetaException { if (isPartitionLimitEnabled()) { int partitionLimit = MetastoreConf.getIntVar(conf, ConfVars.LIMIT_PARTITION_REQUEST); int partitionRequest = (maxToFetch < 0) ? numPartitions : maxToFetch; if (partitionRequest > partitionLimit) { String configName = ConfVars.LIMIT_PARTITION_REQUEST.toString(); throw new MetaException(String.format(PARTITION_NUMBER_EXCEED_LIMIT_MSG, partitionRequest, tblName, partitionLimit, configName)); } } }
@Override Table resolveTable(CompactionInfo ci) throws MetaException { try { return msc.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName); } catch (TException e) { LOG.error("Unable to find table " + ci.getFullTableName() + ", " + e.getMessage()); throw new MetaException(e.toString()); } }
public boolean deleteDir(Path f, boolean recursive, boolean ifPurge, boolean needCmRecycle) throws MetaException { if (needCmRecycle) { try { cm.recycle(f, RecycleType.MOVE, ifPurge); } catch (IOException e) { throw new MetaException(org.apache.hadoop.util.StringUtils.stringifyException(e)); } } FileSystem fs = getFs(f); return fsHandler.deleteDir(fs, f, recursive, ifPurge, conf); }
@Override public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException { if (partitionSpec == null) { throw new MetaException("PartitionSpec cannot be null."); } if (partitionSpec.getCatName() == null) { partitionSpec.setCatName(getDefaultCatalog(conf)); } return client.add_partitions_pspec(partitionSpec.toPartitionSpec()); }
@Override public void addNotificationEvent(NotificationEvent entry) throws MetaException { if (addNotificationEventModifier != null) { Boolean success = addNotificationEventModifier.apply(entry); if ((success != null) && !success) { throw new MetaException("InjectableBehaviourObjectStore: Invalid addNotificationEvent operation on DB: " + entry.getDbName() + " table: " + entry.getTableName() + " event : " + entry.getEventType()); } } super.addNotificationEvent(entry); }