@Override protected IMetaStoreClient getClient() throws MetaException { return new HiveMetaStoreClientPreCatalog(conf); }
/** * @param new_part * @return the added partition * @throws InvalidObjectException * @throws AlreadyExistsException * @throws MetaException * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition) */ @Override public Partition add_partition(Partition new_part) throws TException { return add_partition(new_part, null); }
/** * @param dbname * @param tbl_name * @param new_tbl * @throws InvalidOperationException * @throws MetaException * @throws TException * @see * org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table( * java.lang.String, java.lang.String, * org.apache.hadoop.hive.metastore.api.Table) */ @Override public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, TException { alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null); }
filterHook = loadFilterHooks(); uriResolverHook = loadUriResolverHook(); fileMetadataBatchSize = MetastoreConf.getIntVar( conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX); snapshotActiveConf(); return; resolveUris(); } else { LOG.error("NOT getting uris from conf"); String delegationTokenStr = getDelegationToken(proxyUser, proxyUser); SecurityUtils.setTokenStr(UserGroupInformation.getCurrentUser(), delegationTokenStr, delegationTokenPropString); MetastoreConf.setVar(this.conf, ConfVars.TOKEN_SIGNATURE, delegationTokenPropString); close(); } catch (Exception e) { LOG.error("Error while setting delegation token for " + proxyUser, e); open();
@Override public void reconnect() throws MetaException { if (localMetaStore) { // For direct DB connections we don't yet support reestablishing connections. throw new MetaException("For direct MetaStore DB connections, we don't support retries" + " at the client level."); } else { close(); if (uriResolverHook != null) { //for dynamic uris, re-lookup if there are new metastore locations resolveUris(); } if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) { // Swap the first element of the metastoreUris[] with a random element from the rest // of the array. Rationale being that this method will generally be called when the default // connection has died and the default connection is likely to be the first array element. promoteRandomMetaStoreURI(); } open(); } }
@Override public Partition appendPartition(String dbName, String tableName, String partName) throws TException { return appendPartition(dbName, tableName, partName, (EnvironmentContext)null); }
@Override public List<TxnToWriteId> allocateTableWriteIdsBatch(List<Long> txnIds, String dbName, String tableName) throws TException { AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(dbName, tableName); rqst.setTxnIds(txnIds); return allocateTableWriteIdsBatchIntr(rqst); }
/** * @param tbl * @throws MetaException * @throws NoSuchObjectException * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) */ @Override public void createTable(Table tbl) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { createTable(tbl, null); }
public Partition appendPartitionByName(String dbName, String tableName, String partName) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { return appendPartitionByName(dbName, tableName, partName, null); }
@Deprecated @Override public void compact(String dbname, String tableName, String partitionName, CompactionType type, Map<String, String> tblproperties) throws TException { compact2(dbname, tableName, partitionName, type, tblproperties); }
@Override public long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException { return allocateTableWriteIdsBatch(Collections.singletonList(txnId), dbName, tableName).get(0).getWriteId(); }
/** * @param table_name * @param db_name * @param part_vals * @return the appended partition * @throws InvalidObjectException * @throws AlreadyExistsException * @throws MetaException * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, * java.lang.String, java.util.List) */ @Override public Partition appendPartition(String db_name, String table_name, List<String> part_vals) throws TException { return appendPartition(db_name, table_name, part_vals, null); }
@Override public List<TxnToWriteId> replAllocateTableWriteIdsBatch(String dbName, String tableName, String replPolicy, List<TxnToWriteId> srcTxnToWriteIdList) throws TException { AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(dbName, tableName); rqst.setReplPolicy(replPolicy); rqst.setSrcTxnToWriteIdList(srcTxnToWriteIdList); return allocateTableWriteIdsBatchIntr(rqst); }
@Override protected IMetaStoreClient getClient() throws Exception { // Separate client to create the catalog catalogCapableClient = new HiveMetaStoreClient(conf); catLocation = MetaStoreTestUtils.getTestWarehouseDir(catName); Catalog cat = new CatalogBuilder() .setName(catName) .setLocation(catLocation) .build(); catalogCapableClient.createCatalog(cat); catalogCapableClient.close(); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CATALOG_DEFAULT, catName); return new HiveMetaStoreClientPreCatalog(conf); }
@Override public void alter_table(String defaultDatabaseName, String tblName, Table table, boolean cascade) throws InvalidOperationException, MetaException, TException { EnvironmentContext environmentContext = new EnvironmentContext(); if (cascade) { environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); } alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext); }