private List<Table> getNonTransactionalTables() { return tablesFromReadEntities(inputs) .stream() .filter(table -> !AcidUtils.isTransactionalTable(table)) .collect(Collectors.toList()); }
public static boolean isFullAcidTable(Map<String, String> params) { return isTransactionalTable(params) && !isInsertOnlyTable(params); }
public final boolean isTransactionalTable() { return AcidUtils.isTransactionalTable(getTable()); }
public static boolean isTransactionalTable(Table table) { return isTransactionalTable(table == null ? null : table.getTTable()); }
public static boolean isTransactionalTable(CreateTableDesc table) { if (table == null || table.getTblProps() == null) { return false; } return isTransactionalTable(table.getTblProps()); }
/** * Should produce the same result as * {@link org.apache.hadoop.hive.metastore.txn.TxnUtils#isAcidTable(org.apache.hadoop.hive.metastore.api.Table)} */ public static boolean isFullAcidTable(org.apache.hadoop.hive.metastore.api.Table table) { return isTransactionalTable(table) && !isInsertOnlyTable(table.getParameters()); }
private boolean areEventsForDmlNeeded(Table tbl, Partition oldPart) { // For Acid IUD, add partition is a meta data only operation. So need to add the new files added // information into the TXN_WRITE_NOTIFICATION_LOG table. return conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && ((null != oldPart) || AcidUtils.isTransactionalTable(tbl)); }
public TruncateTableDesc(String tableName, Map<String, String> partSpec, ReplicationSpec replicationSpec, Table table) { this.tableName = tableName; this.partSpec = partSpec; this.replicationSpec = replicationSpec; this.isTransactional = AcidUtils.isTransactionalTable(table); this.fullTableName = table == null ? tableName : Warehouse.getQualifiedName(table.getTTable()); }
private WriteType determineAlterTableWriteType(Table tab, AlterTableDesc desc, AlterTableTypes op) { boolean convertingToAcid = false; if(desc != null && desc.getProps() != null && Boolean.parseBoolean(desc.getProps().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))) { convertingToAcid = true; } if(!AcidUtils.isTransactionalTable(tab) && convertingToAcid) { //non-acid to transactional conversion (property itself) must be mutexed to prevent concurrent writes. // See HIVE-16688 for use cases. return WriteType.DDL_EXCLUSIVE; } return WriteEntity.determineAlterTableWriteType(op); } private void addInputsOutputsAlterTable(String tableName, Map<String, String> partSpec,
public static boolean isInsertOnlyTable(Table table) { return isTransactionalTable(table) && getAcidOperationalProperties(table).isInsertOnly(); }
public TableScanDesc(final String alias, List<VirtualColumn> vcs, Table tblMetadata) { this.alias = alias; this.virtualCols = vcs; this.tableMetadata = tblMetadata; if (tblMetadata != null) { dbName = tblMetadata.getDbName(); tableName = tblMetadata.getTableName(); } isTranscationalTable = AcidUtils.isTransactionalTable(this.tableMetadata); if (isTranscationalTable) { acidOperationalProperties = AcidUtils.getAcidOperationalProperties(this.tableMetadata); } }
private boolean resetStatisticsProps(Table table) { if (hasFollowingStatsTask()) { // If there's a follow-on stats task then the stats will be correct after load, so don't // need to reset the statistics. return false; } if (!work.getIsInReplicationScope()) { // If the load is not happening during replication and there is not follow-on stats // task, stats will be inaccurate after load and so need to be reset. return true; } // If we are loading a table during replication, the stats will also be replicated // and hence accurate if it's a non-transactional table. For transactional table we // do not replicate stats yet. return AcidUtils.isTransactionalTable(table.getParameters()); }
@Override public boolean mayNeedWriteId() { switch (getOp()) { case ADDPROPS: { return isExplicitStatsUpdate || AcidUtils.isToInsertOnlyTable(null, getProps()) || (AcidUtils.isTransactionalTable(getProps()) && !isFullAcidConversion); } case DROPPROPS: return isExplicitStatsUpdate; // The check for the following ones is performed before setting AlterTableDesc into the acid field. // These need write ID and stuff because they invalidate column stats. case RENAMECOLUMN: case RENAME: case REPLACECOLS: case ADDCOLS: case ALTERLOCATION: case UPDATECOLUMNS: return true; // RENAMEPARTITION is handled in RenamePartitionDesc default: return false; } }
@Override public void handle(Context withinContext) throws Exception { LOG.info("Processing#{} UpdateTableColumnStat message : {}", fromEventId(), eventMessageAsJSON); Table qlMdTable = new Table(eventMessage.getTableObject()); if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTable, withinContext.hiveConf)) { return; } // Statistics without data doesn't make sense. if (withinContext.replicationSpec.isMetadataOnly()) { return; } // For now we do not replicate the statistics for transactional tables. if (AcidUtils.isTransactionalTable(qlMdTable)) { return; } DumpMetaData dmd = withinContext.createDmd(this); dmd.setPayload(eventMessageAsJSON); dmd.write(); }
private int persistColumnStats(Hive db) throws HiveException, MetaException, IOException { ColumnStatistics colStats = constructColumnStatsFromInput(); ColumnStatisticsDesc colStatsDesc = colStats.getStatsDesc(); // We do not support stats replication for a transactional table yet. If we are converting // a non-transactional table to a transactional table during replication, we might get // column statistics but we shouldn't update those. if (work.getColStats() != null && AcidUtils.isTransactionalTable(getHive().getTable(colStatsDesc.getDbName(), colStatsDesc.getTableName()))) { LOG.debug("Skipped updating column stats for table " + TableName.getDbTable(colStatsDesc.getDbName(), colStatsDesc.getTableName()) + " because it is converted to a transactional table during replication."); return 0; } SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); db.setPartitionColumnStatistics(request); return 0; }
private static Table createNewTableMetadataObject(ImportTableDesc tblDesc, boolean isRepl) throws SemanticException { Table newTable = new Table(tblDesc.getDatabaseName(), tblDesc.getTableName()); //so that we know the type of table we are creating: acid/MM to match what was exported newTable.setParameters(tblDesc.getTblProps()); if(tblDesc.isExternal() && AcidUtils.isTransactionalTable(newTable)) { if (isRepl) { throw new SemanticException("External tables may not be transactional: " + Warehouse.getQualifiedName(tblDesc.getDatabaseName(), tblDesc.getTableName())); } else { throw new AssertionError("Internal error: transactional properties not set properly" + tblDesc.getTblProps()); } } return newTable; }
private void analyzeAlterTableUpdateColumns(ASTNode ast, String tableName, HashMap<String, String> partSpec) throws SemanticException { boolean isCascade = false; if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) { isCascade = true; } AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.UPDATECOLUMNS); alterTblDesc.setOldName(tableName); alterTblDesc.setIsCascade(isCascade); alterTblDesc.setPartSpec(partSpec); Table tbl = getTable(tableName); if (AcidUtils.isTransactionalTable(tbl)) { setAcidDdlDesc(alterTblDesc); } rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); }
private void analyzeAlterTableRename(String[] source, ASTNode ast, boolean expectView) throws SemanticException { String[] target = getQualifiedTableName((ASTNode) ast.getChild(0)); String sourceName = getDotName(source); String targetName = getDotName(target); AlterTableDesc alterTblDesc = new AlterTableDesc(sourceName, targetName, expectView, null); Table table = getTable(sourceName, true); if (AcidUtils.isTransactionalTable(table)) { setAcidDdlDesc(alterTblDesc); } addInputsOutputsAlterTable(sourceName, null, alterTblDesc); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); }
private void analyzeAlterTableModifyCols(String[] qualified, ASTNode ast, HashMap<String, String> partSpec, AlterTableTypes alterType) throws SemanticException { String tblName = getDotName(qualified); List<FieldSchema> newCols = getColumns((ASTNode) ast.getChild(0)); boolean isCascade = false; if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) { isCascade = true; } AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, partSpec, newCols, alterType, isCascade); Table table = getTable(tblName, true); if (AcidUtils.isTransactionalTable(table)) { setAcidDdlDesc(alterTblDesc); } addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); }
public int persistColumnStats(Hive db, Table tbl) throws HiveException, MetaException, IOException { // Construct a column statistics object from the result List<ColumnStatistics> colStats = constructColumnStatsFromPackedRows(tbl); // Persist the column statistics object to the metastore // Note, this function is shared for both table and partition column stats. if (colStats.isEmpty()) { return 0; } SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats); request.setNeedMerge(colStatDesc.isNeedMerge()); HiveTxnManager txnMgr = AcidUtils.isTransactionalTable(tbl) ? SessionState.get().getTxnMgr() : null; if (txnMgr != null) { request.setValidWriteIdList(AcidUtils.getTableValidWriteIdList(conf, AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName())).toString()); request.setWriteId(txnMgr.getAllocatedTableWriteId(tbl.getDbName(), tbl.getTableName())); } db.setPartitionColumnStatistics(request); return 0; }