protected void checkConnection() throws KettleDatabaseException { // check connection // connect and disconnect Database dbchecked = null; try { dbchecked = new Database( this, connection ); dbchecked.shareVariablesWith( this ); dbchecked.connect( parentJob.getTransactionId(), null ); } finally { if ( dbchecked != null ) { dbchecked.disconnect(); } } }
/** * Construct a new Database Connection * * @param databaseMeta The Database Connection Info to construct the connection with. */ public Database( LoggingObjectInterface parentObject, DatabaseMeta databaseMeta ) { this.parentLoggingObject = parentObject; this.databaseMeta = databaseMeta; shareVariablesWith( databaseMeta ); if ( parentObject instanceof VariableSpace ) { shareVariablesWith( (VariableSpace) parentObject ); } log = new LogChannel( this, parentObject ); this.containerObjectId = log.getContainerObjectId(); this.logLevel = log.getLogLevel(); if ( parentObject != null ) { log.setGatheringMetrics( parentObject.isGatheringMetrics() ); } pstmt = null; rowMeta = null; dbmd = null; rowlimit = 0; written = 0; opened = copy = 0; if ( log.isDetailed() ) { log.logDetailed( "New database connection defined" ); } }
/** * Construct a new Database Connection * * @param databaseMeta The Database Connection Info to construct the connection with. * @deprecated Please specify the parent object so that we can see which object is initiating a database connection */ @Deprecated public Database( DatabaseMeta databaseMeta ) { this.parentLoggingObject = null; this.databaseMeta = databaseMeta; shareVariablesWith( databaseMeta ); // In this case we don't have the parent object, so we don't know which // object makes the connection. // We also don't know what log level to attach to it, so we have to stick to // the default // As such, this constructor is @deprecated. // log = new LogChannel( this ); logLevel = log.getLogLevel(); containerObjectId = log.getContainerObjectId(); pstmt = null; rowMeta = null; dbmd = null; rowlimit = 0; written = 0; opened = copy = 0; if ( log.isDetailed() ) { log.logDetailed( "New database connection defined" ); } }
/** * {@inheritDoc} * * @see org.pentaho.di.trans.step.BaseStepMeta#getRequiredFields(org.pentaho.di.core.variables.VariableSpace) */ @Override public RowMetaInterface getRequiredFields( final VariableSpace space ) throws KettleException { if ( !this.useControlFile.getValue() ) { final Database database = connectToDatabase(); database.shareVariablesWith( space ); RowMetaInterface fields = database.getTableFieldsMeta( StringUtils.EMPTY, space.environmentSubstitute( this.targetTable.getValue() ) ); database.disconnect(); if ( fields == null ) { throw new KettleException( MESSAGES.getString( "TeraFastMeta.Exception.TableNotFound" ) ); } return fields; } return null; }
private void connectDatabase( Database database ) throws KettleDatabaseException { database.shareVariablesWith( this ); if ( getTransMeta().isUsingUniqueConnections() ) { synchronized ( getTrans() ) { database.connect( getTrans().getTransactionId(), getPartitionID() ); } } else { database.connect( getPartitionID() ); } database.setCommit( 100 ); // we never get a commit, but it just turns off auto-commit. if ( log.isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "DatabaseLookup.Log.ConnectedToDatabase" ) ); } } }
/** * Writes step information to a step logging table (if one has been configured). * * @throws KettleException if any errors occur during logging */ protected void writeStepLogInformation() throws KettleException { Database db = null; StepLogTable stepLogTable = getTransMeta().getStepLogTable(); try { db = createDataBase( stepLogTable.getDatabaseMeta() ); db.shareVariablesWith( this ); db.connect(); db.setCommit( logCommitSize ); for ( StepMetaDataCombi combi : getSteps() ) { db.writeLogRecord( stepLogTable, LogStatus.START, combi, null ); } db.cleanupLogRecords( stepLogTable ); } catch ( Exception e ) { throw new KettleException( BaseMessages.getString( PKG, "Trans.Exception.UnableToWriteStepInformationToLogTable" ), e ); } finally { disconnectDb( db ); } }
/** * Writes information to Job Log table. Cleans old records, in case job is finished. */ protected void writeLogTableInformation( JobLogTable jobLogTable, LogStatus status ) throws KettleJobException, KettleDatabaseException { boolean cleanLogRecords = status.equals( LogStatus.END ); String tableName = jobLogTable.getActualTableName(); DatabaseMeta logcon = jobLogTable.getDatabaseMeta(); Database ldb = createDataBase( logcon ); ldb.shareVariablesWith( this ); try { ldb.connect(); ldb.setCommit( logCommitSize ); ldb.writeLogRecord( jobLogTable, status, this, null ); if ( cleanLogRecords ) { ldb.cleanupLogRecords( jobLogTable ); } } catch ( KettleDatabaseException dbe ) { addErrors( 1 ); throw new KettleJobException( "Unable to end processing by writing log record to table " + tableName, dbe ); } finally { if ( !ldb.isAutoCommit() ) { ldb.commitLog( true, jobLogTable ); } ldb.disconnect(); } }
@Override public SQLStatement getSQLStatements( TransMeta transMeta, StepMeta stepMeta, RowMetaInterface prev, Repository repository, IMetaStore metaStore ) { SQLStatement retval = new SQLStatement( stepMeta.getName(), database, null ); // default: nothing to do! if ( useDatabase ) { // Otherwise, don't bother! if ( database != null ) { Database db = new Database( loggingObject, database ); db.shareVariablesWith( transMeta ); try { db.connect(); if ( !db.checkSequenceExists( schemaName, sequenceName ) ) { String cr_table = db.getCreateSequenceStatement( sequenceName, startAt, incrementBy, maxValue, true ); retval.setSQL( cr_table ); } else { retval.setSQL( null ); // Empty string means: nothing to do: set it to null... } } catch ( KettleException e ) { retval.setError( BaseMessages.getString( PKG, "AddSequenceMeta.ErrorMessage.UnableToConnectDB" ) + Const.CR + e.getMessage() ); } finally { db.disconnect(); } } else { retval.setError( BaseMessages.getString( PKG, "AddSequenceMeta.ErrorMessage.NoConnectionDefined" ) ); } } return retval; }
if ( !Utils.isEmpty( tableName ) ) { Database db = new Database( loggingObject, databaseMeta ); db.shareVariablesWith( transMeta ); try { db.connect();
if ( !Utils.isEmpty( tablename ) ) { Database db = new Database( loggingObject, databaseMeta ); db.shareVariablesWith( transMeta ); try { db.connect();
db.shareVariablesWith( transMeta ); try { db.connect();
/** * Write job entry log information. * * @throws KettleException * the kettle exception */ protected void writeJobEntryLogInformation() throws KettleException { Database db = null; JobEntryLogTable jobEntryLogTable = getJobMeta().getJobEntryLogTable(); try { db = createDataBase( jobEntryLogTable.getDatabaseMeta() ); db.shareVariablesWith( this ); db.connect(); db.setCommit( logCommitSize ); for ( JobEntryCopy copy : getJobMeta().getJobCopies() ) { db.writeLogRecord( jobEntryLogTable, LogStatus.START, copy, this ); } db.cleanupLogRecords( jobEntryLogTable ); } catch ( Exception e ) { throw new KettleException( BaseMessages.getString( PKG, "Job.Exception.UnableToJobEntryInformationToLogTable" ), e ); } finally { if ( !db.isAutoCommit() ) { db.commitLog( true, jobEntryLogTable ); } db.disconnect(); } }
data.db.shareVariablesWith( this ); try { if ( getTransMeta().isUsingUniqueConnections() ) {
if ( databaseMeta != null ) { Database database = new Database( loggingObject, databaseMeta ); database.shareVariablesWith( jobMeta ); try { database.connect();
if ( databaseMeta != null ) { Database database = new Database( loggingObject, databaseMeta ); database.shareVariablesWith( transMeta ); try { database.connect();
public boolean init( StepMetaInterface smi, StepDataInterface sdi ) { meta = (InsertUpdateMeta) smi; data = (InsertUpdateData) sdi; if ( super.init( smi, sdi ) ) { try { if ( meta.getDatabaseMeta() == null ) { logError( BaseMessages.getString( PKG, "InsertUpdate.Init.ConnectionMissing", getStepname() ) ); return false; } data.db = new Database( this, meta.getDatabaseMeta() ); data.db.shareVariablesWith( this ); if ( getTransMeta().isUsingUniqueConnections() ) { synchronized ( getTrans() ) { data.db.connect( getTrans().getTransactionId(), getPartitionID() ); } } else { data.db.connect( getPartitionID() ); } data.db.setCommit( meta.getCommitSize( this ) ); return true; } catch ( KettleException ke ) { logError( BaseMessages.getString( PKG, "InsertUpdate.Log.ErrorOccurredDuringStepInitialize" ) + ke.getMessage() ); } } return false; }
data.db.shareVariablesWith( this ); try { if ( getTransMeta().isUsingUniqueConnections() ) {
data.db.shareVariablesWith( this ); try { if ( getTransMeta().isUsingUniqueConnections() ) {
data.db.shareVariablesWith( this ); try { if ( getTransMeta().isUsingUniqueConnections() ) {
data.db.shareVariablesWith( this ); if ( !Utils.isEmpty( meta.getSchemaname() ) ) { data.realSchemaname = environmentSubstitute( meta.getSchemaname() );