private void disconnectDb( Database db ) throws KettleDatabaseException { if ( db == null ) { return; } if ( !db.isAutoCommit() ) { db.commit( true ); } db.disconnect(); }
/** * A MySQL InnoDB hack really... Doesn't like a lock in case there's been a read in another session. It considers it * an open transaction. * * @throws KettleDatabaseException */ public void closeReadTransaction() throws KettleDatabaseException { if ( databaseMeta.isMySQLVariant() && !database.isAutoCommit() ) { database.commit(); } }
public synchronized void disconnect() { try { repository.connectionDelegate.closeStepAttributeLookupPreparedStatement(); repository.connectionDelegate.closeTransAttributeLookupPreparedStatement(); repository.connectionDelegate.closeLookupJobEntryAttribute(); for ( String sql : sqlMap.keySet() ) { PreparedStatement ps = sqlMap.get( sql ); try { ps.close(); } catch ( SQLException e ) { log.logError( "Error closing prepared statement: " + sql, e ); } } if ( !database.isAutoCommit() ) { commit(); } repository.setConnected( false ); } catch ( KettleException dbe ) { log.logError( "Error disconnecting from database : " + dbe.getMessage() ); } finally { database.disconnect(); sqlMap.clear(); } }
if ( !isAutoCommit() ) { if ( useBatchInsert ) { debug = "insertRow add batch"; if ( !isAutoCommit() && ( written % commitsize ) == 0 ) { if ( useBatchInsert ) { isBatchUpdate = true;
try { if ( ps != null ) { if ( !isAutoCommit() ) {
public synchronized void commit() throws KettleException { try { closeJobAttributeInsertPreparedStatement(); closeStepAttributeInsertPreparedStatement(); closeTransAttributeInsertPreparedStatement(); if ( !database.isAutoCommit() ) { database.commit(); } // Also, clear the counters, reducing the risk of collisions! // Counters.getInstance().clear(); } catch ( KettleException dbe ) { throw new KettleException( "Unable to commit repository connection", dbe ); } }
try { if ( ps != null ) { if ( !isAutoCommit() ) {
public void dispose( StepMetaInterface smi, StepDataInterface sdi ) { meta = (CombinationLookupMeta) smi; data = (CombinationLookupData) sdi; if ( data.db != null ) { try { if ( !data.db.isAutoCommit() ) { if ( getErrors() == 0 ) { data.db.commit(); } else { data.db.rollback(); } } } catch ( KettleDatabaseException e ) { logError( BaseMessages.getString( PKG, "CombinationLookup.Log.UnexpectedError" ) + " : " + e.toString() ); } finally { data.db.disconnect(); } } super.dispose( smi, sdi ); }
@Override public void dispose( StepMetaInterface smi, StepDataInterface sdi ) { meta = (DimensionLookupMeta) smi; data = (DimensionLookupData) sdi; if ( data.db != null ) { try { if ( !data.db.isAutoCommit() ) { if ( getErrors() == 0 ) { data.db.commit(); } else { data.db.rollback(); } } } catch ( KettleDatabaseException e ) { logError( BaseMessages.getString( PKG, "DimensionLookup.Log.ErrorOccurredInProcessing" ) + e.getMessage() ); } finally { data.db.disconnect(); } } super.dispose( smi, sdi ); } }
public void dispose( StepMetaInterface smi, StepDataInterface sdi ) { meta = (DeleteMeta) smi; data = (DeleteData) sdi; if ( data.db != null ) { try { if ( !data.db.isAutoCommit() ) { if ( getErrors() == 0 ) { data.db.commit(); } else { data.db.rollback(); } } data.db.closeUpdate(); } catch ( KettleDatabaseException e ) { logError( BaseMessages.getString( PKG, "Delete.Log.UnableToCommitUpdateConnection" ) + data.db + "] :" + e.toString() ); setErrors( 1 ); } finally { data.db.disconnect(); } } super.dispose( smi, sdi ); }
public void dispose( StepMetaInterface smi, StepDataInterface sdi ) { meta = (InsertUpdateMeta) smi; data = (InsertUpdateData) sdi; if ( data.db != null ) { try { if ( !data.db.isAutoCommit() ) { if ( getErrors() == 0 ) { data.db.commit(); } else { data.db.rollback(); } } data.db.closeUpdate(); data.db.closeInsert(); } catch ( KettleDatabaseException e ) { logError( BaseMessages.getString( PKG, "InsertUpdate.Log.UnableToCommitConnection" ) + e.toString() ); setErrors( 1 ); } finally { data.db.disconnect(); } } super.dispose( smi, sdi ); }
if ( getConnection().getAutoCommit() != isAutoCommit() ) { setAutoCommit( isAutoCommit() );
public void dispose( StepMetaInterface smi, StepDataInterface sdi ) { meta = (UpdateMeta) smi; data = (UpdateData) sdi; if ( data.db != null ) { try { if ( !data.db.isAutoCommit() ) { if ( getErrors() == 0 ) { data.db.emptyAndCommit( data.prepStatementUpdate, meta.useBatchUpdate() ); } else { data.db.rollback(); } } data.db.closePreparedStatement( data.prepStatementUpdate ); data.db.closePreparedStatement( data.prepStatementLookup ); } catch ( KettleDatabaseException e ) { logError( BaseMessages.getString( PKG, "Update.Log.UnableToCommitUpdateConnection" ) + data.db + "] :" + e.toString() ); setErrors( 1 ); } finally { data.db.disconnect(); } } super.dispose( smi, sdi ); }
@Override public void dispose( StepMetaInterface smi, StepDataInterface sdi ) { meta = (ExecSQLRowMeta) smi; data = (ExecSQLRowData) sdi; if ( log.isBasic() ) { logBasic( BaseMessages.getString( PKG, "ExecSQLRow.Log.FinishingReadingQuery" ) ); } if ( data.db != null ) { try { if ( !data.db.isAutoCommit() ) { if ( getErrors() == 0 ) { data.db.commit(); } else { data.db.rollback(); } } } catch ( KettleDatabaseException e ) { logError( BaseMessages.getString( PKG, "Update.Log.UnableToCommitUpdateConnection" ) + data.db + "] :" + e.toString() ); setErrors( 1 ); } finally { data.db.disconnect(); } } super.dispose( smi, sdi ); }
/** * Writes information to Job Log table. Cleans old records, in case job is finished. */ protected void writeLogTableInformation( JobLogTable jobLogTable, LogStatus status ) throws KettleJobException, KettleDatabaseException { boolean cleanLogRecords = status.equals( LogStatus.END ); String tableName = jobLogTable.getActualTableName(); DatabaseMeta logcon = jobLogTable.getDatabaseMeta(); Database ldb = createDataBase( logcon ); ldb.shareVariablesWith( this ); try { ldb.connect(); ldb.setCommit( logCommitSize ); ldb.writeLogRecord( jobLogTable, status, this, null ); if ( cleanLogRecords ) { ldb.cleanupLogRecords( jobLogTable ); } } catch ( KettleDatabaseException dbe ) { addErrors( 1 ); throw new KettleJobException( "Unable to end processing by writing log record to table " + tableName, dbe ); } finally { if ( !ldb.isAutoCommit() ) { ldb.commitLog( true, jobLogTable ); } ldb.disconnect(); } }
/** * Write job entry log information. * * @throws KettleException * the kettle exception */ protected void writeJobEntryLogInformation() throws KettleException { Database db = null; JobEntryLogTable jobEntryLogTable = getJobMeta().getJobEntryLogTable(); try { db = createDataBase( jobEntryLogTable.getDatabaseMeta() ); db.shareVariablesWith( this ); db.connect(); db.setCommit( logCommitSize ); for ( JobEntryCopy copy : getJobMeta().getJobCopies() ) { db.writeLogRecord( jobEntryLogTable, LogStatus.START, copy, this ); } db.cleanupLogRecords( jobEntryLogTable ); } catch ( Exception e ) { throw new KettleException( BaseMessages.getString( PKG, "Job.Exception.UnableToJobEntryInformationToLogTable" ), e ); } finally { if ( !db.isAutoCommit() ) { db.commitLog( true, jobEntryLogTable ); } db.disconnect(); } }
"Trans.Exception.UnableToWriteLogChannelInformationToLogTable" ), e ); } finally { if ( !db.isAutoCommit() ) { db.commit( true );
if ( !ldb.isAutoCommit() ) { ldb.commitLog( true, transMeta.getTransLogTable() );
data.result = data.db.execStatements( data.sql ); if ( !data.db.isAutoCommit() ) { data.db.commit();
if ( !data.db.isAutoCommit() ) { if ( meta.getCommitSize() == 1 ) { data.db.commit();