private void getJobLogTableOptions( JobLogTable jobLogTable ) { // The connection... // jobLogTable.setConnectionName( wLogconnection.getText() ); jobLogTable.setSchemaName( wLogSchema.getText() ); jobLogTable.setTableName( wLogTable.getText() ); jobLogTable.setLogInterval( wLogInterval.getText() ); jobLogTable.setLogSizeLimit( wLogSizeLimit.getText() ); jobLogTable.setTimeoutInDays( wLogTimeout.getText() ); for ( int i = 0; i < jobLogTable.getFields().size(); i++ ) { TableItem item = wOptionFields.table.getItem( i ); LogTableField field = jobLogTable.getFields().get( i ); field.setEnabled( item.getChecked() ); field.setFieldName( item.getText( 1 ) ); } }
JobLogTable jobLogTable = jobMeta.getJobLogTable(); if ( !jobLogTable.isDefined() ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.ERROR, "The logging table is not defined" ) ); } else { if ( !Utils.isEmpty( schemaName ) ) { if ( schemaName.equals( jobLogTable.getSchemaName() ) ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.APPROVAL, "The schema name is set to: " + schemaName ) ); if ( tableName.equals( jobLogTable.getTableName() ) ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.APPROVAL, "The table name is set to: " + tableName ) ); if ( connectionName.equals( jobLogTable.getDatabaseMeta().getName() ) ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.APPROVAL, "The database connection used for logging is: "
private JobLogTable getJobLogTableWithAllEqFields( String fieldsValue ) { JobLogTable jobLogTable = JobLogTable.getDefault( mockedVariableSpace, mockedHasDbInterface ); initCommonTableFields( jobLogTable, fieldsValue ); jobLogTable.setLogSizeLimit( fieldsValue ); jobLogTable.setLogInterval( fieldsValue ); return jobLogTable; }
private void saveJobDetails( DataNode rootNode, JobMeta jobMeta ) throws KettleException { rootNode.setProperty( PROP_EXTENDED_DESCRIPTION, jobMeta.getExtendedDescription() ); rootNode.setProperty( PROP_JOB_VERSION, jobMeta.getJobversion() ); rootNode.setProperty( PROP_JOB_STATUS, jobMeta.getJobstatus() < 0 ? -1L : jobMeta.getJobstatus() ); if ( jobMeta.getJobLogTable().getDatabaseMeta() != null ) { DataNodeRef ref = new DataNodeRef( jobMeta.getJobLogTable().getDatabaseMeta().getObjectId().getId() ); rootNode.setProperty( PROP_DATABASE_LOG, ref ); } rootNode.setProperty( PROP_TABLE_NAME_LOG, jobMeta.getJobLogTable().getTableName() ); rootNode.setProperty( PROP_CREATED_USER, jobMeta.getCreatedUser() ); rootNode.setProperty( PROP_CREATED_DATE, jobMeta.getCreatedDate() ); rootNode.setProperty( PROP_MODIFIED_USER, jobMeta.getModifiedUser() ); rootNode.setProperty( PROP_MODIFIED_DATE, jobMeta.getModifiedDate() ); rootNode.setProperty( PROP_USE_BATCH_ID, jobMeta.getJobLogTable().isBatchIdUsed() ); rootNode.setProperty( PROP_PASS_BATCH_ID, jobMeta.isBatchIdPassed() ); rootNode.setProperty( PROP_USE_LOGFIELD, jobMeta.getJobLogTable().isLogFieldUsed() ); rootNode.setProperty( PROP_SHARED_FILE, jobMeta.getSharedObjectsFile() ); rootNode.setProperty( PROP_LOG_SIZE_LIMIT, jobMeta.getJobLogTable().getLogSizeLimit() ); // Save the logging tables too.. // RepositoryAttributeInterface attributeInterface = new PurRepositoryAttribute( rootNode, jobMeta.getDatabases() ); for ( LogTableInterface logTable : jobMeta.getLogTables() ) { logTable.saveToRepository( attributeInterface ); } // Load the attributes map // AttributesMapUtil.saveAttributesMap( rootNode, jobMeta ); }
jobMeta.setJobversion( getString( rootNode, PROP_JOB_VERSION ) ); jobMeta.setJobstatus( (int) rootNode.getProperty( PROP_JOB_STATUS ).getLong() ); jobMeta.getJobLogTable().setTableName( getString( rootNode, PROP_TABLE_NAME_LOG ) ); String id = rootNode.getProperty( PROP_DATABASE_LOG ).getRef().getId().toString(); DatabaseMeta conn = ( DatabaseMeta.findDatabase( jobMeta.getDatabases(), new StringObjectId( id ) ) ); jobMeta.getJobLogTable().setConnectionName( conn.getName() ); jobMeta.getJobLogTable().setBatchIdUsed( rootNode.getProperty( PROP_USE_BATCH_ID ).getBoolean() ); jobMeta.setBatchIdPassed( rootNode.getProperty( PROP_PASS_BATCH_ID ).getBoolean() ); jobMeta.getJobLogTable().setLogFieldUsed( rootNode.getProperty( PROP_USE_LOGFIELD ).getBoolean() ); jobMeta.getJobLogTable().setLogSizeLimit( getString( rootNode, PROP_LOG_SIZE_LIMIT ) );
int intervalInSeconds = Const.toInt( environmentSubstitute( jobLogTable.getLogInterval() ), -1 ); if ( jobLogTable.isDefined() ) { DatabaseMeta logcon = jobMeta.getJobLogTable().getDatabaseMeta(); String schemaName = environmentSubstitute( jobMeta.getJobLogTable().getActualSchemaName() ); String tableName = environmentSubstitute( jobMeta.getJobLogTable().getActualTableName() ); String schemaAndTable = jobMeta.getJobLogTable().getDatabaseMeta().getQuotedSchemaTableCombination( schemaName, tableName ); Database ldb = new Database( this, logcon ); ldb.shareVariablesWith( this ); if ( jobMeta.getJobLogTable().isBatchIdUsed() ) { id_batch = logcon.getNextBatchId( ldb, schemaName, tableName, jobLogTable.getKeyField().getFieldName() ); setBatchId( id_batch.longValue() ); if ( getPassedBatchId() <= 0 ) {
jobLogTable.setConnectionName( XMLHandler.getTagValue( jobnode, "logconnection" ) ); jobLogTable.setTableName( XMLHandler.getTagValue( jobnode, "logtable" ) ); jobLogTable.setBatchIdUsed( "Y".equalsIgnoreCase( XMLHandler.getTagValue( jobnode, "use_batchid" ) ) ); jobLogTable.setLogFieldUsed( "Y".equalsIgnoreCase( XMLHandler.getTagValue( jobnode, "use_logfield" ) ) ); jobLogTable.findField( JobLogTable.ID.CHANNEL_ID ).setEnabled( false ); jobLogTable.findField( JobLogTable.ID.LINES_REJECTED ).setEnabled( false ); } else { jobLogTable.loadXML( jobLogNode, databases, null );
public void tableFieldsChangedCorrectlyAfterNullingGlobalParams( String valueForAllFields, String expectedAfterNullingGlobalParams ) { PerformanceLogTable performanceLogTable = getPerformanceLogTableWithAllEqFields( valueForAllFields ); performanceLogTable.setAllGlobalParametersToNull(); commonTableFieldsValueChecker( performanceLogTable, expectedAfterNullingGlobalParams ); assertEquals( performanceLogTable.getLogInterval(), expectedAfterNullingGlobalParams ); JobLogTable jobLogTable = getJobLogTableWithAllEqFields( valueForAllFields ); jobLogTable.setAllGlobalParametersToNull(); commonTableFieldsValueChecker( jobLogTable, expectedAfterNullingGlobalParams ); assertEquals( jobLogTable.getLogInterval(), expectedAfterNullingGlobalParams ); assertEquals( jobLogTable.getLogSizeLimit(), expectedAfterNullingGlobalParams ); TransLogTable transLogTable = getTransLogTableWithAllEqFields( valueForAllFields ); transLogTable.setAllGlobalParametersToNull(); commonTableFieldsValueChecker( transLogTable, expectedAfterNullingGlobalParams ); assertEquals( transLogTable.getLogInterval(), expectedAfterNullingGlobalParams ); assertEquals( transLogTable.getLogSizeLimit(), expectedAfterNullingGlobalParams ); }
fdLogInterval.right = new FormAttachment( 100, 0 ); wLogInterval.setLayoutData( fdLogInterval ); wLogInterval.setText( Const.NVL( jobLogTable.getLogInterval(), "" ) ); fdLogTimeout.right = new FormAttachment( 100, 0 ); wLogTimeout.setLayoutData( fdLogTimeout ); wLogTimeout.setText( Const.NVL( jobLogTable.getTimeoutInDays(), "" ) ); fdLogSizeLimit.right = new FormAttachment( 100, 0 ); wLogSizeLimit.setLayoutData( fdLogSizeLimit ); wLogSizeLimit.setText( Const.NVL( jobLogTable.getLogSizeLimit(), "" ) ); wlFields.setLayoutData( fdlFields ); final java.util.List<LogTableField> fields = jobLogTable.getFields(); final int nrRows = fields.size();
monitor.subTask( BaseMessages.getString( PKG, "JobMeta.Monitor.GettingSQLStatementsForJobLogTables" ) ); if ( jobLogTable.getDatabaseMeta() != null && !Utils.isEmpty( jobLogTable.getTableName() ) ) { Database db = new Database( this, jobLogTable.getDatabaseMeta() ); try { db.connect(); RowMetaInterface fields = jobLogTable.getLogRecord( LogStatus.START, null, null ).getRowMeta(); String sql = db.getDDL( jobLogTable.getTableName(), fields ); if ( sql != null && sql.length() > 0 ) { SQLStatement stat = new SQLStatement( BaseMessages.getString( PKG, "JobMeta.SQLFeedback.ThisJob" ), jobLogTable.getDatabaseMeta(), sql ); stats.add( stat ); jobLogTable.getDatabaseMeta(), null ); stat.setError( BaseMessages.getString( PKG, "JobMeta.SQLFeedback.ErrorObtainingJobLogTableInfo" ) + dbe.getMessage() );
if ( jobMeta.getJobLogTable().getDatabaseMeta() != null && !Utils.isEmpty( jobMeta.getJobLogTable().getTableName() ) ) { jobGraph.addAllTabs(); jobGraph.extraViewTabFolder.setSelection( jobGraph.jobHistoryDelegate.getJobHistoryTab() );
if ( isBatchIdUsed() ) { RowMetaInterface batchIndex = new RowMeta(); LogTableField keyField = getKeyField(); LogTableField errorsField = findField( ID.ERRORS ); if ( errorsField != null ) { ValueMetaInterface valueMeta = new ValueMetaBase( errorsField.getFieldName(), errorsField.getDataType() ); lookupIndex.addValueMeta( valueMeta ); LogTableField statusField = findField( ID.STATUS ); if ( statusField != null ) { ValueMetaInterface valueMeta = new ValueMetaBase( statusField.getFieldName(), statusField.getDataType() ); lookupIndex.addValueMeta( valueMeta ); LogTableField transNameField = findField( ID.JOBNAME ); if ( transNameField != null ) { ValueMetaInterface valueMeta = new ValueMetaBase( transNameField.getFieldName(), transNameField.getDataType() );
/** * Writes information to Job Log table. Cleans old records, in case job is finished. */ protected void writeLogTableInformation( JobLogTable jobLogTable, LogStatus status ) throws KettleJobException, KettleDatabaseException { boolean cleanLogRecords = status.equals( LogStatus.END ); String tableName = jobLogTable.getActualTableName(); DatabaseMeta logcon = jobLogTable.getDatabaseMeta(); Database ldb = createDataBase( logcon ); ldb.shareVariablesWith( this ); try { ldb.connect(); ldb.setCommit( logCommitSize ); ldb.writeLogRecord( jobLogTable, status, this, null ); if ( cleanLogRecords ) { ldb.cleanupLogRecords( jobLogTable ); } } catch ( KettleDatabaseException dbe ) { addErrors( 1 ); throw new KettleJobException( "Unable to end processing by writing log record to table " + tableName, dbe ); } finally { if ( !ldb.isAutoCommit() ) { ldb.commitLog( true, jobLogTable ); } ldb.disconnect(); } }
private Set<DatabaseMeta> getUsedDatabaseMetas() { Set<DatabaseMeta> databaseMetas = new HashSet<DatabaseMeta>(); for ( JobEntryCopy jobEntryCopy : getJobCopies() ) { DatabaseMeta[] dbs = jobEntryCopy.getEntry().getUsedDatabaseConnections(); if ( dbs != null ) { for ( DatabaseMeta db : dbs ) { databaseMetas.add( db ); } } } databaseMetas.add( jobLogTable.getDatabaseMeta() ); for ( LogTableInterface logTable : getExtraLogTables() ) { databaseMetas.add( logTable.getDatabaseMeta() ); } return databaseMetas; }
jobhops = new ArrayList<JobHopMeta>(); jobLogTable = JobLogTable.getDefault( this, this ); jobEntryLogTable = JobEntryLogTable.getDefault( this, this ); extraLogTables = new ArrayList<LogTableInterface>();
.getJobLogTable().getDatabaseMeta() != null ? jobMeta.getJobLogTable().getDatabaseMeta().getObjectId() : -1L ); table.addValue( new ValueMetaString( KettleDatabaseRepository.FIELD_JOB_TABLE_NAME_LOG ), jobMeta .getJobLogTable().getTableName() ); table.addValue( new ValueMetaBoolean( KettleDatabaseRepository.FIELD_JOB_USE_BATCH_ID ), jobMeta .getJobLogTable().isBatchIdUsed() ); table.addValue( new ValueMetaBoolean( KettleDatabaseRepository.FIELD_JOB_USE_LOGFIELD ), jobMeta .getJobLogTable().isLogFieldUsed() ); repository.connectionDelegate.insertJobAttribute( jobMeta.getObjectId(), 0, KettleDatabaseRepository.JOB_ATTRIBUTE_LOG_SIZE_LIMIT, 0, jobMeta .getJobLogTable().getLogSizeLimit() ); if ( jobMeta.getJobLogTable().getDatabaseMeta() != null ) { repository.insertJobEntryDatabase( jobMeta.getObjectId(), null, jobMeta .getJobLogTable().getDatabaseMeta().getObjectId() );
jobMeta.getJobLogTable().setConnectionName( logDb.getName() ); jobMeta.getJobLogTable().setTableName( jobRow.getString( KettleDatabaseRepository.FIELD_JOB_TABLE_NAME_LOG, null ) ); jobMeta.getJobLogTable().setBatchIdUsed( jobRow.getBoolean( KettleDatabaseRepository.FIELD_JOB_USE_BATCH_ID, false ) ); jobMeta.getJobLogTable().setLogFieldUsed( jobRow.getBoolean( KettleDatabaseRepository.FIELD_JOB_USE_LOGFIELD, false ) ); jobMeta.getJobLogTable().setLogSizeLimit( getJobAttributeString( jobMeta.getObjectId(), 0, KettleDatabaseRepository.JOB_ATTRIBUTE_LOG_SIZE_LIMIT ) );
@Test public void recordsCleanUpMethodIsCalled_JobLogTable() throws Exception { JobLogTable jobLogTable = JobLogTable.getDefault( mockedVariableSpace, hasDatabasesInterface ); setAllTableParamsDefault( jobLogTable ); doCallRealMethod().when( mockedJob ).writeLogTableInformation( jobLogTable, LogStatus.END ); mockedJob.writeLogTableInformation( jobLogTable, LogStatus.END ); verify( mockedDataBase ).cleanupLogRecords( jobLogTable ); }