public ComplexNodePattern(BiFunction<M,K, Object> getter, K key, NodePattern pattern) { this(getter, Pair.makePair(key,pattern)); }
public static Pair<Monotonicity, MonotonicityType> monoFromString(String mono) { switch (mono) { case "nonmonotone": return Pair.makePair(Monotonicity.NONMONOTONE, MonotonicityType.NONE); case "additive": return Pair.makePair(Monotonicity.MONOTONE, MonotonicityType.ADDITIVE); case "multiplicative": return Pair.makePair(Monotonicity.MONOTONE, MonotonicityType.MULTIPLICATIVE); case "additive-multiplicative": return Pair.makePair(Monotonicity.MONOTONE, MonotonicityType.BOTH); case "anti-additive": return Pair.makePair(Monotonicity.ANTITONE, MonotonicityType.ADDITIVE); case "anti-multiplicative": return Pair.makePair(Monotonicity.ANTITONE, MonotonicityType.MULTIPLICATIVE); case "anti-additive-multiplicative": return Pair.makePair(Monotonicity.ANTITONE, MonotonicityType.BOTH); default: throw new IllegalArgumentException("Unknown monotonicity: " + mono); } }
public void add(K c, NodePattern pattern) { annotationPatterns.add(Pair.makePair(c, pattern)); }
/** * Effectively, merge two spans */ private static Pair<Integer, Integer> includeInSpan(Pair<Integer, Integer> span, Pair<Integer, Integer> toInclude) { return Pair.makePair(Math.min(span.first, toInclude.first), Math.max(span.second, toInclude.second)); }
public boolean isIncompatible(CorefCluster c1, CorefCluster c2) { // Was any of the pairs of mentions marked as incompatible int cid1 = Math.min(c1.clusterID, c2.clusterID); int cid2 = Math.max(c1.clusterID, c2.clusterID); return incompatibleClusters.contains(Pair.makePair(cid1,cid2)); }
public boolean isIncompatible(Mention m1, Mention m2) { int mid1 = Math.min(m1.mentionID, m2.mentionID); int mid2 = Math.max(m1.mentionID, m2.mentionID); return incompatibles.contains(Pair.makePair(mid1,mid2)); }
public void addIncompatible(Mention m1, Mention m2) { int mid1 = Math.min(m1.mentionID, m2.mentionID); int mid2 = Math.max(m1.mentionID, m2.mentionID); incompatibles.add(Pair.makePair(mid1,mid2)); int cid1 = Math.min(m1.corefClusterID, m2.corefClusterID); int cid2 = Math.max(m1.corefClusterID, m2.corefClusterID); incompatibleClusters.add(Pair.makePair(cid1,cid2)); }
/** * Returns the vertices that are "leftmost, rightmost" Note this requires that the IndexedFeatureLabels present actually have * ordering information. * TODO: can be done more efficiently? */ public static Pair<IndexedWord, IndexedWord> leftRightMostChildVertices(IndexedWord startNode, SemanticGraph sg) { TreeSet<IndexedWord> vertices = new TreeSet<>(); for (IndexedWord vertex : sg.descendants(startNode)) { vertices.add(vertex); } return Pair.makePair(vertices.first(), vertices.last()); }
public static boolean entityIsAcronym(Document document, CorefCluster mentionCluster, CorefCluster potentialAntecedent) { Pair<Integer, Integer> idPair = Pair.makePair(Math.min(mentionCluster.clusterID, potentialAntecedent.clusterID), Math.max(mentionCluster.clusterID, potentialAntecedent.clusterID)); if(!document.acronymCache.containsKey(idPair)) { boolean isAcronym = false; for(Mention m : mentionCluster.corefMentions){ if(m.isPronominal()) continue; for(Mention ant : potentialAntecedent.corefMentions){ if(isAcronym(m.originalSpan, ant.originalSpan)) isAcronym = true; } } document.acronymCache.put(idPair, isAcronym); } return document.acronymCache.get(idPair); }
private static Pair<Integer, Integer> getSpan(List<CoreLabel> tokens, ToIntFunction<CoreLabel> toMin, ToIntFunction<CoreLabel> toMax) { int min = Integer.MAX_VALUE; int max = Integer.MIN_VALUE; for (CoreLabel token : tokens) { min = Math.min(min, toMin.applyAsInt(token)); max = Math.max(max, toMax.applyAsInt(token) + 1); } return Pair.makePair(min, max); }
public Pair<K1, K2> next() { TwoDimensionalMap.Entry<K1, K2, Boolean> entry = backingIterator.next(); return Pair.makePair(entry.getFirstKey(), entry.getSecondKey()); }
public Pair<String, String> indexToBinaryTransform(int pos) { if (pos < numBinaryMatrices * binaryTransformSize) { for (TwoDimensionalMap.Entry<String, String, SimpleMatrix> entry : binaryTransform) { if (binaryTransformSize < pos) { pos -= binaryTransformSize; } else { return Pair.makePair(entry.getFirstKey(), entry.getSecondKey()); } } } return null; }
/** {@inheritDoc} */ @Override public Pair<Annotation, InputStream> read(InputStream is) throws IOException, ClassNotFoundException, ClassCastException { CoreNLPProtos.Document doc = CoreNLPProtos.Document.parseDelimitedFrom(is); return Pair.makePair(fromProto(doc), is); }
public Pair<String, String> indexToBinaryScore(int pos) { pos -= (numBinaryMatrices * binaryTransformSize + numUnaryMatrices * unaryTransformSize); if (pos < numBinaryMatrices * binaryScoreSize && pos >= 0) { for (TwoDimensionalMap.Entry<String, String, SimpleMatrix> entry : binaryScore) { if (binaryScoreSize < pos) { pos -= binaryScoreSize; } else { return Pair.makePair(entry.getFirstKey(), entry.getSecondKey()); } } } return null; }
@Override public Pair<DeepTree, DeepTree> process(Tree tree) { // For each tree, move in the direction of the gold tree, and // move away from the direction of the best scoring hypothesis IdentityHashMap<Tree, SimpleMatrix> goldVectors = new IdentityHashMap<>(); double scoreGold = score(tree, goldVectors); DeepTree bestTree = getHighestScoringTree(tree, TRAIN_LAMBDA); DeepTree goldTree = new DeepTree(tree, goldVectors, scoreGold); return Pair.makePair(goldTree, bestTree); }
private static Pair<Logger, Redwood.Flag> getLoggerAndLevel(Object[] channel) { Pair<String, Redwood.Flag> pair = getSourceStringAndLevel(channel); // Get the logger for slf4j Logger impl = LoggerFactory.getLogger(pair.first()); return Pair.makePair(impl, pair.second()); }
public static Pair<String, FileFilter> getTreebankDescription(String[] args, int argIndex, String flag) { Triple<String, FileFilter, Double> description = getWeightedTreebankDescription(args, argIndex, flag); return Pair.makePair(description.first(), description.second()); }
/** * Mostly just an alias, but make sure our featurizer is serializable! */ public interface Featurizer extends Function<Triple<ClauseSplitterSearchProblem.State, ClauseSplitterSearchProblem.Action, ClauseSplitterSearchProblem.State>, Counter<String>>, Serializable { boolean isSimpleSplit(Counter<String> feats); }
private static Pair<IndexedWord, String> findDependentVerb(Mention m) { if (m.enhancedDependency.getRoots().size() == 0) { return new Pair<>(); } // would be nice to condense this pattern, but sadly =reln // always uses the last relation in the sequence, not the first SemgrexPattern pattern = SemgrexPattern.compile("{idx:" + (m.headIndex+1) + "} [ <=reln {tag:/^V.*/}=verb | <=reln ({} << {tag:/^V.*/}=verb) ]"); SemgrexMatcher matcher = pattern.matcher(m.enhancedDependency); while (matcher.find()) { return Pair.makePair(matcher.getNode("verb"), matcher.getRelnString("reln")); } return new Pair<>(); }
private static Pair<IndexedWord, String> findDependentVerb(Mention m) { if (m.dependency.getRoots().size() == 0) { return new Pair<>(); } // would be nice to condense this pattern, but sadly =reln // always uses the last relation in the sequence, not the first SemgrexPattern pattern = SemgrexPattern.compile("{idx:" + (m.headIndex+1) + "} [ <=reln {tag:/^V.*/}=verb | <=reln ({} << {tag:/^V.*/}=verb) ]"); SemgrexMatcher matcher = pattern.matcher(m.dependency); while (matcher.find()) { return Pair.makePair(matcher.getNode("verb"), matcher.getRelnString("reln")); } return new Pair<>(); }