@Override public void loadRep( Repository rep, IMetaStore metaStore, ObjectId id_jobentry, List<DatabaseMeta> databases, List<SlaveServer> slaveServers ) throws KettleException { try { logmessage = rep.getJobEntryAttributeString( id_jobentry, "logmessage" ); entryLogLevel = LogLevel.getLogLevelForCode( rep.getJobEntryAttributeString( id_jobentry, "loglevel" ) ); logsubject = rep.getJobEntryAttributeString( id_jobentry, "logsubject" ); } catch ( KettleDatabaseException dbe ) { throw new KettleException( BaseMessages.getString( PKG, "WriteToLog.Error.UnableToLoadFromRepository.Label" ) + id_jobentry, dbe ); } }
public void setLogLevelString( String value ) { LogLevel lvl = LogLevel.getLogLevelForCode( value ); super.setLogLevel( lvl.getLevel() ); }
@Override public void loadXML( Node entrynode, List<DatabaseMeta> databases, List<SlaveServer> slaveServers, Repository rep, IMetaStore metaStore ) throws KettleXMLException { try { super.loadXML( entrynode, databases, slaveServers ); logmessage = XMLHandler.getTagValue( entrynode, "logmessage" ); entryLogLevel = LogLevel.getLogLevelForCode( XMLHandler.getTagValue( entrynode, "loglevel" ) ); logsubject = XMLHandler.getTagValue( entrynode, "logsubject" ); } catch ( Exception e ) { throw new KettleXMLException( BaseMessages.getString( PKG, "WriteToLog.Error.UnableToLoadFromXML.Label" ), e ); } }
logfile = XMLHandler.getTagValue( entrynode, "logfile" ); logext = XMLHandler.getTagValue( entrynode, "logext" ); logFileLevel = LogLevel.getLogLevelForCode( XMLHandler.getTagValue( entrynode, "loglevel" ) ); insertScript = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "insertScript" ) );
log.setLogLevel( LogLevel.getLogLevelForCode( optionLoglevel.toString() ) ); log.logMinimal( BaseMessages.getString( PKG, "Pan.Log.Loglevel", log.getLogLevel().getDescription() ) );
public void loadSettings() { LogLevel logLevel = LogLevel.getLogLevelForCode( props.getLogLevel() ); DefaultLogLevel.setLogLevel( logLevel ); log.setLogLevel( logLevel ); KettleLogStore.getAppender().setMaxNrLines( props.getMaxNrLinesInLog() ); // transMeta.setMaxUndo(props.getMaxUndo()); DBCache.getInstance().setActive( props.useDBCache() ); }
log.setLogLevel( LogLevel.getLogLevelForCode( optionLoglevel.toString() ) ); log.logMinimal( BaseMessages.getString( PKG, "Kitchen.Log.LogLevel", log.getLogLevel().getDescription() ) );
LogLevel logLevel = LogLevel.getLogLevelForCode( levelOption ); jobExecutionConfiguration.setLogLevel( logLevel ); JobConfiguration jobConfiguration = new JobConfiguration( jobMeta, jobExecutionConfiguration );
LogLevel logLevel = LogLevel.getLogLevelForCode( levelOption ); transExecutionConfiguration.setLogLevel( logLevel ); TransConfiguration transConfiguration = new TransConfiguration( transMeta, transExecutionConfiguration );
private static void initLogging( CommandLineOption[] options ) throws KettleException { StringBuilder optionLogFile = getCommandLineOption( options, "logfile" ).getArgument(); StringBuilder optionLogLevel = getCommandLineOption( options, "level" ).getArgument(); // Set default Locale: Locale.setDefault( Const.DEFAULT_LOCALE ); if ( !Utils.isEmpty( optionLogFile ) ) { fileLoggingEventListener = new FileLoggingEventListener( optionLogFile.toString(), true ); if ( log.isBasic() ) { String filename = fileLoggingEventListener.getFilename(); log.logBasic( BaseMessages.getString( PKG, "Spoon.Log.LoggingToFile" ) + filename ); } KettleLogStore.getAppender().addLoggingEventListener( fileLoggingEventListener ); } else { fileLoggingEventListener = null; } if ( !Utils.isEmpty( optionLogLevel ) ) { log.setLogLevel( LogLevel.getLogLevelForCode( optionLogLevel.toString() ) ); if ( log.isBasic() ) { // "Logging is at level : " log.logBasic( BaseMessages.getString( PKG, "Spoon.Log.LoggingAtLevel" ) + log.getLogLevel().getDescription() ); } } }
logfile = XMLHandler.getTagValue( entrynode, "logfile" ); logext = XMLHandler.getTagValue( entrynode, "logext" ); logFileLevel = LogLevel.getLogLevelForCode( XMLHandler.getTagValue( entrynode, "loglevel" ) ); clustering = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "cluster" ) ); createParentFolder = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "create_parent_folder" ) );
logLevel = LogLevel.getLogLevelForCode( XMLHandler.getTagValue( trecNode, "log_level" ) ); setLogfile = "Y".equalsIgnoreCase( XMLHandler.getTagValue( trecNode, "log_file" ) ); logFileName = XMLHandler.getTagValue( trecNode, "log_filename" );
logfile = XMLHandler.getTagValue( entrynode, "logfile" ); logext = XMLHandler.getTagValue( entrynode, "logext" ); logFileLevel = LogLevel.getLogLevelForCode( XMLHandler.getTagValue( entrynode, "loglevel" ) ); setAppendLogfile = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "set_append_logfile" ) ); remoteSlaveServerName = XMLHandler.getTagValue( entrynode, "slave_server_name" );
LogLevel logLevel = LogLevel.getLogLevelForCode( levelOption ); transExecutionConfiguration.setLogLevel( logLevel ); TransConfiguration transConfiguration = new TransConfiguration( transMeta, transExecutionConfiguration );
logLevel = LogLevel.getLogLevelForCode( XMLHandler.getTagValue( trecNode, "log_level" ) ); clearingLog = "Y".equalsIgnoreCase( XMLHandler.getTagValue( trecNode, "clear_log" ) );
LogLevel logLevel = LogLevel.getLogLevelForCode( levelOption ); jobExecutionConfiguration.setLogLevel( logLevel );
public void loadRep( Repository rep, IMetaStore metaStore, ObjectId id_jobentry, List<DatabaseMeta> databases, List<SlaveServer> slaveServers ) throws KettleException { try { setFileName( rep.getJobEntryAttributeString( id_jobentry, "file_name" ) ); setWorkDirectory( rep.getJobEntryAttributeString( id_jobentry, "work_directory" ) ); argFromPrevious = rep.getJobEntryAttributeBoolean( id_jobentry, "arg_from_previous" ); execPerRow = rep.getJobEntryAttributeBoolean( id_jobentry, "exec_per_row" ); setLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_logfile" ); setAppendLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_append_logfile" ); addDate = rep.getJobEntryAttributeBoolean( id_jobentry, "add_date" ); addTime = rep.getJobEntryAttributeBoolean( id_jobentry, "add_time" ); logfile = rep.getJobEntryAttributeString( id_jobentry, "logfile" ); logext = rep.getJobEntryAttributeString( id_jobentry, "logext" ); logFileLevel = LogLevel.getLogLevelForCode( rep.getJobEntryAttributeString( id_jobentry, "loglevel" ) ); insertScript = rep.getJobEntryAttributeBoolean( id_jobentry, "insertScript" ); script = rep.getJobEntryAttributeString( id_jobentry, "script" ); // How many arguments? int argnr = rep.countNrJobEntryAttributes( id_jobentry, "argument" ); allocate( argnr ); // Read them all... for ( int a = 0; a < argnr; a++ ) { arguments[a] = rep.getJobEntryAttributeString( id_jobentry, a, "argument" ); } } catch ( KettleDatabaseException dbe ) { throw new KettleException( "Unable to load job entry of type 'shell' from the repository with id_jobentry=" + id_jobentry, dbe ); } }
logfile = rep.getJobEntryAttributeString( id_jobentry, "logfile" ); logext = rep.getJobEntryAttributeString( id_jobentry, "logext" ); logFileLevel = LogLevel.getLogLevelForCode( rep.getJobEntryAttributeString( id_jobentry, "loglevel" ) ); clustering = rep.getJobEntryAttributeBoolean( id_jobentry, "cluster" ); createParentFolder = rep.getJobEntryAttributeBoolean( id_jobentry, "create_parent_folder" );
logfile = rep.getJobEntryAttributeString( id_jobentry, "logfile" ); logext = rep.getJobEntryAttributeString( id_jobentry, "logext" ); logFileLevel = LogLevel.getLogLevelForCode( rep.getJobEntryAttributeString( id_jobentry, "loglevel" ) ); setAppendLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_append_logfile" ); remoteSlaveServerName = rep.getJobEntryAttributeString( id_jobentry, "slave_server_name" );