/** * Is this an external table? * * @param table * Check if this table is external. * @return True if the table is external, otherwise false. */ private boolean isExternal(Table table) { return MetaStoreUtils.isExternalTable(table); }
@Override public void dropConstraint(String dbName, String tableName, String constraintName) throws TException { dropConstraint(getDefaultCatalog(conf), dbName, tableName, constraintName); }
/** * Generate the table descriptor for intermediate files. */ public static TableDesc getIntermediateFileTableDesc( List<FieldSchema> fieldSchemas) { return new TableDesc(SequenceFileInputFormat.class, SequenceFileOutputFormat.class, Utilities.makeProperties( serdeConstants.LIST_COLUMNS, MetaStoreUtils .getColumnNamesFromFieldSchema(fieldSchemas), serdeConstants.COLUMN_NAME_DELIMITER, MetaStoreUtils.getColumnNameDelimiter(fieldSchemas), serdeConstants.COLUMN_NAME_DELIMITER, MetaStoreUtils.getColumnNameDelimiter(fieldSchemas), serdeConstants.LIST_COLUMN_TYPES, MetaStoreUtils .getColumnTypesFromFieldSchema(fieldSchemas), serdeConstants.ESCAPE_CHAR, "\\", serdeConstants.SERIALIZATION_LIB,LazyBinarySerDe.class.getName())); }
@Override public List<String> getAllTables(String dbname) throws MetaException { try { return getAllTables(getDefaultCatalog(conf), dbname); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } return null; }
@Override public List<String> getMaterializedViewsForRewriting(String catName, String dbname) throws MetaException { try { List<String> views = client.get_materialized_views_for_rewriting(prependCatalogToDbName(catName, dbname, conf)); return FilterUtils.filterTableNamesIfEnabled(isClientFilterEnabled, filterHook, catName, dbname, views); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } return null; }
RawStore ms = getMS(); String dbName = request.getDbName(), tblName = request.getTblName(); String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); boolean ifExists = request.isSetIfExists() && request.isIfExists(); boolean deleteData = request.isSetDeleteData() && request.isDeleteData(); if (MetaStoreUtils.isArchived(part) && MetaStoreUtils.getArchivingLevel(part) < expr.getPartArchiveLevel()) { throw new MetaException("Cannot drop a subset of partitions " + " in an archive, partition " + part); if (MetaStoreUtils.isArchived(part)) { Path archiveParentDir = MetaStoreUtils.getOriginalLocation(part); verifyIsWritablePath(archiveParentDir); archToDelete.add(archiveParentDir);
/** * Helper functions to convert IOException to MetaException */ public static FileSystem getFs(Path f, Configuration conf) throws MetaException { try { return f.getFileSystem(conf); } catch (IOException e) { MetaStoreUtils.logAndThrowMetaException(e); } return null; }
@Override public void rollbackCreateTable(Table table) throws MetaException { String tableName = getHBaseTableName(table); boolean isPurge = !MetaStoreUtils.isExternalTable(table) || MetaStoreUtils.isExternalTablePurge(table); try { if (isPurge && getHBaseAdmin().tableExists(TableName.valueOf(tableName))) { // we have created an HBase table, so we delete it to roll back; if (getHBaseAdmin().isTableEnabled(TableName.valueOf(tableName))) { getHBaseAdmin().disableTable(TableName.valueOf(tableName)); } getHBaseAdmin().deleteTable(TableName.valueOf(tableName)); } } catch (IOException ie) { throw new MetaException(StringUtils.stringifyException(ie)); } }
private static Properties getSerdeProperties(HCatTableInfo info, HCatSchema s) throws SerDeException { Properties props = new Properties(); List<FieldSchema> fields = HCatUtil.getFieldSchemaList(s.getFields()); props.setProperty(org.apache.hadoop.hive.serde.serdeConstants.LIST_COLUMNS, MetaStoreUtils.getColumnNamesFromFieldSchema(fields)); props.setProperty(serdeConstants.COLUMN_NAME_DELIMITER, MetaStoreUtils.getColumnNameDelimiter(fields)); props.setProperty(org.apache.hadoop.hive.serde.serdeConstants.LIST_COLUMN_TYPES, MetaStoreUtils.getColumnTypesFromFieldSchema(fields)); props.setProperty("columns.comments", MetaStoreUtils.getColumnCommentsFromFieldSchema(fields)); // setting these props to match LazySimpleSerde props.setProperty(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_NULL_FORMAT, "\\N"); props.setProperty(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1"); //add props from params set in table schema props.putAll(info.getStorerInfo().getProperties()); return props; }
String sourceDbName, String sourceTableName, String destDbName, String destTableName) throws TException { String[] parsedDestDbName = parseDbName(destDbName, conf); String[] parsedSourceDbName = parseDbName(sourceDbName, conf); List<String> partVals = MetaStoreUtils.getPvals(sourceTable.getPartitionKeys(), partitionSpecs); List<String> partValsPresent = new ArrayList<> (); + " for the table " + sourceTableName); boolean sameColumns = MetaStoreUtils.compareFieldColumns( sourceTable.getSd().getCols(), destinationTable.getSd().getCols()); boolean samePartitions = MetaStoreUtils.compareFieldColumns( sourceTable.getPartitionKeys(), destinationTable.getPartitionKeys()); if (!sameColumns || !samePartitions) {
MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL)); } else { LOG.debug("HMSC::open(): Could not find delegation token. Creating KERBEROS-based thrift connection."); transport = authBridge.createClientTransport( principalConfig, store.getHost(), "KERBEROS", null, transport, MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
String newDbName = newt.getDbName().toLowerCase(); if (!MetaStoreUtils.validateName(newTblName, handler.getConf())) { throw new InvalidOperationException(newTblName + " is not a valid object name"); && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0 || StringUtils.isEmpty(newt.getSd().getLocation())) && !MetaStoreUtils.isExternalTable(oldt)) { Database olddb = msdb.getDatabase(catName, dbname);
if (MetaStoreUtils.isExternalTable(tbl.getTTable())) { Logger.info("Table " + tbl.getTableName() + " is external. Skip StatsOptimizer."); return null; if (MetaStoreUtils.isNonNativeTable(tbl.getTTable())) { Logger.info("Table " + tbl.getTableName() + " is non Native table. Skip StatsOptimizer."); return null;
public HiveLockObject(Table tbl, HiveLockObjectData lockData) { this(new String[] {tbl.getDbName(), org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(tbl.getTableName())}, lockData); }
public static Path getOriginalLocation(Partition part) { Map<String, String> params = part.getParameters(); assert(isArchived(part)); String originalLocation = params.get(hive_metastoreConstants.ORIGINAL_LOCATION); assert( originalLocation != null); return new Path(originalLocation); }
Table table = rawStore.getTable(catName, dbName, tblName); List<String> partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table); if ((partNames != null) && (partNames.size() > 0)) { Deadline.startTimer("getAggregareStatsForAllPartitions");
public TableWrapper(org.apache.hadoop.hive.metastore.api.Table apiTable) { org.apache.hadoop.hive.metastore.api.Table wrapperApiTable = apiTable.deepCopy(); if (wrapperApiTable.getTableType() == null){ // TableType specified was null, we need to figure out what type it was. if (MetaStoreUtils.isExternalTable(wrapperApiTable)){ wrapperApiTable.setTableType(TableType.EXTERNAL_TABLE.toString()); } else if (MetaStoreUtils.isMaterializedViewTable(wrapperApiTable)) { wrapperApiTable.setTableType(TableType.MATERIALIZED_VIEW.toString()); } else if ((wrapperApiTable.getSd() == null) || (wrapperApiTable.getSd().getLocation() == null)) { wrapperApiTable.setTableType(TableType.VIRTUAL_VIEW.toString()); } else { wrapperApiTable.setTableType(TableType.MANAGED_TABLE.toString()); } } initialize(wrapperApiTable); } }
isArchived = MetaStoreUtils.isArchived(part); if (isArchived) { archiveParentDir = MetaStoreUtils.getOriginalLocation(part); verifyIsWritablePath(archiveParentDir);
/** * Convert FieldSchemas to columnNames. */ public static String getColumnNamesFromFieldSchema(List<FieldSchema> fieldSchemas) { String delimiter = getColumnNameDelimiter(fieldSchemas); StringBuilder sb = new StringBuilder(); for (int i = 0; i < fieldSchemas.size(); i++) { if (i > 0) { sb.append(delimiter); } sb.append(fieldSchemas.get(i).getName()); } return sb.toString(); }
/** * Returns archiving level, which is how many fields were set in partial * specification ARCHIVE was run for */ public static int getArchivingLevel(Partition p) throws HiveException { try { return MetaStoreUtils.getArchivingLevel(p.getTPartition()); } catch (MetaException ex) { throw new HiveException(ex.getMessage(), ex); } }