private void tokenize(InputFile inputFile, SensorContext context) { int lineIdx = 1; NewCpdTokens newCpdTokens = context.newCpdTokens().onFile(inputFile); try { StringBuilder sb = new StringBuilder();
public CPDCallback(SensorContext context, InputFile file, OpenEdgeSettings settings, ParseUnit unit) { this.cpdTokens = context.newCpdTokens().onFile(file); this.file = file; this.settings = settings; this.unit = unit; }
CpdVisitor(SensorContext sensorContext, InputFile inputFile) { this.inputFile = inputFile; cpdTokens = sensorContext.newCpdTokens().onFile(inputFile); }
public CpdVisitor(SensorContext context, InputFile inputFile) { newCpdTokens = context.newCpdTokens().onFile(inputFile); }
private static void saveCpd(SensorContext sensorContext, SensorContextUtils.CpdToken[] cpdTokens, InputFile file) { NewCpdTokens newCpdTokens = sensorContext.newCpdTokens().onFile(file); for (SensorContextUtils.CpdToken cpdToken : cpdTokens) { newCpdTokens.addToken(cpdToken.startLine, cpdToken.startCol, cpdToken.endLine, cpdToken.endCol, cpdToken.image); } newCpdTokens.save(); }
private void tokenize(InputFile inputFile, SensorContext context) { int lineIdx = 1; NewCpdTokens newCpdTokens = context.newCpdTokens().onFile(inputFile); try { StringBuilder sb = new StringBuilder();
@Override public void visitFile(Tree scriptTree) { File file = getContext().getFile(); inputFile = fileSystem.inputFile(fileSystem.predicates().is(file)); cpdTokens = sensorContext.newCpdTokens().onFile(inputFile); }
@Override public void visitFile(Tree scriptTree) { File file = getContext().getFile(); inputFile = fileSystem.inputFile(fileSystem.predicates().is(file)); cpdTokens = sensorContext.newCpdTokens().onFile(inputFile); }
public void pushCpdTokens(InputFile inputFile, PythonVisitorContext visitorContext) { AstNode root = visitorContext.rootTree(); if (root != null) { NewCpdTokens cpdTokens = context.newCpdTokens().onFile(inputFile); List<Token> tokens = root.getTokens(); for (int i = 0; i < tokens.size(); i++) { Token token = tokens.get(i); TokenType currentTokenType = token.getType(); TokenType nextTokenType = i + 1 < tokens.size() ? tokens.get(i + 1).getType() : GenericTokenType.EOF; // INDENT/DEDENT could not be completely ignored during CPD see https://docs.python.org/3/reference/lexical_analysis.html#indentation // Just taking into account DEDENT is enough, but because the DEDENT token has an empty value, it's the // preceding new line which is added in its place to create a difference if (isNewLineWithIndentationChange(currentTokenType, nextTokenType) || !isIgnoredType(currentTokenType)) { TokenLocation location = new TokenLocation(token); cpdTokens.addToken(location.startLine(), location.startLineOffset(), location.endLine(), location.endLineOffset(), token.getValue()); } } cpdTokens.save(); } }
public void pushCpdTokens(InputFile inputFile, PythonVisitorContext visitorContext) { AstNode root = visitorContext.rootTree(); if (root != null) { NewCpdTokens cpdTokens = context.newCpdTokens().onFile(inputFile); List<Token> tokens = root.getTokens(); for (int i = 0; i < tokens.size(); i++) { Token token = tokens.get(i); TokenType currentTokenType = token.getType(); TokenType nextTokenType = i + 1 < tokens.size() ? tokens.get(i + 1).getType() : GenericTokenType.EOF; // INDENT/DEDENT could not be completely ignored during CPD see https://docs.python.org/3/reference/lexical_analysis.html#indentation // Just taking into account DEDENT is enough, but because the DEDENT token has an empty value, it's the // preceding new line which is added in its place to create a difference if (isNewLineWithIndentationChange(currentTokenType, nextTokenType) || !isIgnoredType(currentTokenType)) { TokenLocation location = new TokenLocation(token); cpdTokens.addToken(location.startLine(), location.startLineOffset(), location.endLine(), location.endLineOffset(), token.getValue()); } } cpdTokens.save(); } }
highlighting.onFile(inputFile); NewCpdTokens cpdTokens = context.newCpdTokens(); cpdTokens.onFile(inputFile);
private static void saveCpdData(List<CpdToken> cpdTokens, InputFile inputFile, SensorContext context) { NewCpdTokens newCpdTokens = context.newCpdTokens().onFile(inputFile); cpdTokens.forEach(cpdToken -> newCpdTokens.addToken( inputFile.newRange( cpdToken.syntaxToken().line(), cpdToken.syntaxToken().column(), cpdToken.syntaxToken().endLine(), cpdToken.syntaxToken().endColumn()), cpdToken.image())); newCpdTokens.save(); }
NewCpdTokens cpdTokens = isNotTest ? context.newCpdTokens().onFile(inputFile) : null; NewHighlighting highlighting = context.newHighlighting().onFile(inputFile); for (GroovyToken groovyToken : tokens) {
private static void saveCpdData(List<CpdToken> cpdTokens, InputFile inputFile, SensorContext context) { NewCpdTokens newCpdTokens = context.newCpdTokens().onFile(inputFile); cpdTokens.forEach(cpdToken -> newCpdTokens.addToken( inputFile.newRange( cpdToken.syntaxToken().line(), cpdToken.syntaxToken().column(), cpdToken.syntaxToken().endLine(), cpdToken.syntaxToken().endColumn()), cpdToken.image())); newCpdTokens.save(); }
highlighting.onFile(inputFile); NewCpdTokens cpdTokens = context.newCpdTokens(); cpdTokens.onFile(inputFile);