/** * Checks if the logging level is detailed. * * @return true if the logging level is detailed, false otherwise */ public boolean isDetailed() { return log.isDetailed(); }
/** * Checks if is detailed. * * @return true, if is detailed */ public boolean isDetailed() { return log.isDetailed(); }
/** * Checks if is detailed. * * @return true, if is detailed */ public boolean isDetailed() { return getLog().isDetailed(); }
@Override public RowSet parse( InputStream in ) throws KettleException { readInput( in ); List<List<?>> results = evalCombinedResult(); int len = results.isEmpty() ? 0 : results.get( 0 ).size(); if ( log.isDetailed() ) { log.logDetailed( BaseMessages.getString( PKG, "JsonInput.Log.NrRecords", len ) ); } if ( len == 0 ) { return getEmptyResponse(); } return new TransposedRowSet( results ); }
/** * Sort the entire vector, if it is not empty. */ void quickSort( List<Object[]> elements ) throws KettleException { if ( elements.size() > 0 ) { Collections.sort( elements, data.rowComparator ); long nrConversions = 0L; for ( ValueMetaInterface valueMeta : data.outputRowMeta.getValueMetaList() ) { nrConversions += valueMeta.getNumberOfBinaryStringConversions(); valueMeta.setNumberOfBinaryStringConversions( 0L ); } if ( log.isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "SortRows.Detailed.ReportNumberOfBinaryStringConv", nrConversions ) ); } } }
@Override protected void fillFileAdditionalFields( JsonInputData data, FileObject file ) throws FileSystemException { super.fillFileAdditionalFields( data, file ); data.filename = KettleVFS.getFilename( file ); data.filenr++; if ( log.isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JsonInput.Log.OpeningFile", file.toString() ) ); } addFileToResultFilesname( file ); }
/** * Specify after how many rows a commit needs to occur when inserting or updating values. * * @param commsize The number of rows to wait before doing a commit on the connection. */ public void setCommit( int commsize ) { commitsize = commsize; String onOff = ( commitsize <= 0 ? "on" : "off" ); try { connection.setAutoCommit( commitsize <= 0 ); if ( log.isDetailed() ) { log.logDetailed( "Auto commit " + onOff ); } } catch ( Exception e ) { if ( log.isDebug() ) { log.logDebug( "Can't turn auto commit " + onOff + Const.CR + Const.getStackTracker( e ) ); } } }
public String sendXML( String xml, String service ) throws Exception { HttpPost method = buildSendXMLMethod( xml.getBytes( Const.XML_ENCODING ), service ); try { return executeAuth( method ); } finally { // Release current connection to the connection pool once you are done method.releaseConnection(); if ( log.isDetailed() ) { log.logDetailed( BaseMessages.getString( PKG, "SlaveServer.DETAILED_SentXmlToService", service, environmentSubstitute( hostname ) ) ); } } }
private boolean systemPing( String hostname, int timeout ) { boolean retval = false; InetAddress address = null; try { address = InetAddress.getByName( hostname ); if ( address == null ) { logError( BaseMessages.getString( PKG, "JobPing.CanNotGetAddress", hostname ) ); return retval; } if ( log.isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JobPing.HostName", address.getHostName() ) ); logDetailed( BaseMessages.getString( PKG, "JobPing.HostAddress", address.getHostAddress() ) ); } retval = address.isReachable( timeout ); } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobPing.ErrorSystemPing", hostname, e.getMessage() ) ); } return retval; }
@Override public void dispose( StepMetaInterface smi, StepDataInterface sdi ) { if ( data.tempFile != null ) { try { closeInput(); closeOutput(); } catch ( KettleFileException e ) { log.logError( e.getLocalizedMessage() ); } boolean tempFileDeleted = data.tempFile.delete(); if ( !tempFileDeleted && log.isDetailed() ) { log.logDetailed( BaseMessages.getString( PKG, "GroupBy.Exception.UnableToDeleteTemporaryFile", data.tempFile.getPath() ) ); } } super.dispose( smi, sdi ); }
private void addFileToResultFilenames( String fileaddentry, Result result, Job parentJob ) { try { ResultFile resultFile = new ResultFile( ResultFile.FILE_TYPE_GENERAL, KettleVFS.getFileObject( fileaddentry, this ), parentJob .getJobname(), toString() ); result.getResultFiles().put( resultFile.getFile().toString(), resultFile ); if ( log.isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JobXMLWellFormed.Log.FileAddedToResultFilesName", fileaddentry ) ); } } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobXMLWellFormed.Error.AddingToFilenameResult", fileaddentry, e .getMessage() ) ); } }
public final void connect( String username, String password ) throws KettleException { Hashtable<String, String> env = new Hashtable<String, String>(); setupEnvironment( env, username, password ); try { /* Establish LDAP association */ doConnect( username, password ); if ( log.isBasic() ) { log.logBasic( BaseMessages.getString( PKG, "LDAPInput.Log.ConnectedToServer", hostname, Const.NVL( username, "" ) ) ); } if ( log.isDetailed() ) { log.logDetailed( BaseMessages.getString( PKG, "LDAPInput.ClassUsed.Message", ctx.getClass().getName() ) ); } } catch ( Exception e ) { throw new KettleException( BaseMessages.getString( PKG, "LDAPinput.Exception.ErrorConnecting", e .getMessage() ), e ); } }
private void displayResults() { if ( log.isDetailed() ) { logDetailed( "=======================================" ); logDetailed( BaseMessages.getString( PKG, "JobUnZip.Log.Info.FilesInError", "" + NrErrors ) ); logDetailed( BaseMessages.getString( PKG, "JobUnZip.Log.Info.FilesInSuccess", "" + NrSuccess ) ); logDetailed( "=======================================" ); } }
private void displayResults() { if ( log.isDetailed() ) { logDetailed( "=======================================" ); logDetailed( BaseMessages.getString( PKG, "JobMoveFiles.Log.Info.FilesInError", "" + NrErrors ) ); logDetailed( BaseMessages.getString( PKG, "JobMoveFiles.Log.Info.FilesInSuccess", "" + NrSuccess ) ); logDetailed( "=======================================" ); } }
/** * Prepare inserting values into a table, using the fields & values in a Row * * @param rowMeta The metadata row to determine which values need to be inserted * @param schemaName The name of the schema in which we want to insert rows * @param tableName The name of the table in which we want to insert rows * @throws KettleDatabaseException if something went wrong. */ public void prepareInsert( RowMetaInterface rowMeta, String schemaName, String tableName ) throws KettleDatabaseException { if ( rowMeta.size() == 0 ) { throw new KettleDatabaseException( "No fields in row, can't insert!" ); } String ins = getInsertStatement( schemaName, tableName, rowMeta ); if ( log.isDetailed() ) { log.logDetailed( "Preparing statement: " + Const.CR + ins ); } prepStatementInsert = prepareSQL( ins ); }
private void addFilenameToResult() throws FileSystemException { if ( meta.isaddTargetFileNametoResult() ) { // Add this to the result file names... ResultFile resultFile = new ResultFile( ResultFile.FILE_TYPE_GENERAL, data.zipFile, getTransMeta().getName(), getStepname() ); resultFile.setComment( BaseMessages.getString( PKG, "ZipFile.Log.FileAddedResult" ) ); addResultFile( resultFile ); if ( log.isDetailed() ) { log.logDetailed( toString(), BaseMessages.getString( PKG, "ZipFile.Log.FilenameAddResult", data.sourceFile .toString() ) ); } } }
private void displayResults() { if ( log.isDetailed() ) { logDetailed( "=======================================" ); logDetailed( BaseMessages.getString( PKG, "JobXMLWellFormed.Log.Info.FilesInError", "" + NrErrors ) ); logDetailed( BaseMessages.getString( PKG, "JobXMLWellFormed.Log.Info.FilesInBadFormed", "" + NrBadFormed ) ); logDetailed( BaseMessages.getString( PKG, "JobXMLWellFormed.Log.Info.FilesInWellFormed", "" + NrWellFormed ) ); logDetailed( "=======================================" ); } }
private void displayResults() { if ( log.isDetailed() ) { logDetailed( "=======================================" ); logDetailed( BaseMessages.getString( PKG, "JobEntryMSAccessBulkLoad.Log.Info.FilesToLoad", "" + NrFilesToProcess ) ); logDetailed( BaseMessages.getString( PKG, "JobEntryMSAccessBulkLoad.Log.Info.FilesLoaded", "" + NrSuccess ) ); logDetailed( BaseMessages.getString( PKG, "JobEntryMSAccessBulkLoad.Log.Info.NrErrors", "" + NrErrors ) ); logDetailed( "=======================================" ); } }
public void truncate() throws KettleException { String cmd; String table = data.schemaTable; String truncateStatement = meta.getDatabaseMeta().getTruncateTableStatement( null, table ); if ( truncateStatement == null ) { throw new KettleException( "Truncate table is not supported!" ); } cmd = truncateStatement + ";"; try { executeSql( cmd ); } catch ( Exception e ) { throw new KettleException( "Error while truncating table " + table, e ); } // try to update the metadata registry util.updateMetadata( meta, -1 ); if ( log.isDetailed() ) { logDetailed( "Successfull: " + cmd ); } }
private void connectDatabase( Database database ) throws KettleDatabaseException { database.shareVariablesWith( this ); if ( getTransMeta().isUsingUniqueConnections() ) { synchronized ( getTrans() ) { database.connect( getTrans().getTransactionId(), getPartitionID() ); } } else { database.connect( getPartitionID() ); } database.setCommit( 100 ); // we never get a commit, but it just turns off auto-commit. if ( log.isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "DatabaseLookup.Log.ConnectedToDatabase" ) ); } } }