Codota Logo
de.tudarmstadt.ukp.dkpro.statistics.agreement.coding
Code IndexAdd Codota to your IDE (free)

How to use de.tudarmstadt.ukp.dkpro.statistics.agreement.coding

Best Java code snippets using de.tudarmstadt.ukp.dkpro.statistics.agreement.coding (Showing top 20 results out of 315)

  • Add the Codota plugin to your IDE and get smart completions
private void myMethod () {
OutputStreamWriter o =
  • Codota IconOutputStream out;new OutputStreamWriter(out)
  • Codota IconOutputStream out;String charsetName;new OutputStreamWriter(out, charsetName)
  • Codota IconHttpURLConnection connection;new OutputStreamWriter(connection.getOutputStream())
  • Smart code suggestions by Codota
}
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Calculates the inter-rater agreement for the given annotation item. 
 *  This is the basic step that is performed for each item of an 
 *  annotation study, when calling {@link #calculateAgreement()}. 
 *  @throws NullPointerException if the given item is null. */
public double calculateItemAgreement(final ICodingAnnotationItem item) {
  return doCalculateItemAgreement(item) / item.getRaterCount();
}

origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

@Override
public double calculateObservedAgreement() {
  double result = 0.0;
  double denominator = 0.0;
  for (ICodingAnnotationItem item : study.getItems()) {
    int raterCount = item.getRaterCount();
    if (raterCount > 1) {
      result += doCalculateItemAgreement(item);
      denominator += raterCount;
    }
  }
  return result / denominator;
}
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Creates a new {@link CodingAnnotationItem} which has been coded with 
 *  the given annotation categories. Note that the order of the categories
 *  must correspond to the raters' indexes. Use null to represent missing
 *  annotations, Invoking <code>addItem("A", "B", null, "A")</code> 
 *  indicates an annotation item which has been coded as category "A"
 *  by rater 0 and 3 and as category "B" by rater 1. Rater 2 did not
 *  assign any category to the item. The method is a shorthand for 
 *  {@link #addItemAsArray(Object[])}. */
public ICodingAnnotationItem addItem(final Object... annotations) {
  return addItemAsArray(annotations);
}
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

@Override
public CodingAnnotationStudy clone() {
  CodingAnnotationStudy result = new CodingAnnotationStudy(getRaterCount());
  for (ICodingAnnotationItem item : getItems()) {
    CodingAnnotationItem newItem = new CodingAnnotationItem(raters.size());
    for (IAnnotationUnit unit : item.getUnits())
      newItem.addUnit(result.createUnit(result.items.size(), 
          unit.getRaterIdx(), unit.getCategory()));
    result.items.add(newItem);
  }
  for (Object category : getCategories())
    result.addCategory(category);
  return result;
}
 
origin: webanno/webanno

public int getNonNullCount(String aCasGroupId)
{
  int i = 0;
  for (ICodingAnnotationItem item : study.getItems()) {
    if (item.getUnit(casGroupIds.indexOf(aCasGroupId)).getCategory() != null) {
      i++;
    }
  }
  return i;
}
origin: webanno/webanno

public IAgreementMeasure make(ICodingAnnotationStudy aStudy)
{
  switch (this) {
  case COHEN_KAPPA_AGREEMENT:
    return new CohenKappaAgreement(aStudy);
  case FLEISS_KAPPA_AGREEMENT:
    return new FleissKappaAgreement(aStudy);
  case KRIPPENDORFF_ALPHA_NOMINAL_AGREEMENT:
    return new KrippendorffAlphaAgreement(aStudy, new NominalDistanceFunction());
  default:   
    throw new IllegalArgumentException();
  }
}

origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Calculates the expected inter-rater agreement that assumes a 
 *  uniform distribution over all raters and annotations. 
 *  @throws NullPointerException if the annotation study is null. 
 *  @throws ArithmeticException if there are no annotation categories. */
public double calculateExpectedAgreement() {
  return 1.0 / (double) study.getCategoryCount();
}
 
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Computes the maximum possible value of the kappa coefficient for the 
 *  provided study. In case of balanced off-marginals (i.e., an equal
 *  disagreement for each pair of categories), the maximum kappa is 1.
 *  In other cases, it decreases with a higher discrepancy of the 
 *  distribution of disagreements.  */
public double calculateMaximumAgreement() {
  double A_O = calculateMaximumObservedAgreement();
  double A_E = calculateExpectedAgreement();
  if (A_E == 0.0)
    return A_O;
  else
    return (A_O - A_E) / (1.0 - A_E);
}
 
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Initializes the instance for the given annotation study. The study 
 *  may never be null. */
public ScottPiAgreement(final ICodingAnnotationStudy study) {
  super(study);
  ensureTwoRaters();
  warnIfMissingValues();
}
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Initializes the instance for the given annotation study. The study 
 *  may never be null. */
public CohenKappaAgreement(final ICodingAnnotationStudy study) {
  super(study);
  ensureTwoRaters();
  warnIfMissingValues();
}
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

protected double doCalculateItemAgreement(final ICodingAnnotationItem item) {
  Map<Object, Integer> annotationsPerCategory 
      = CodingAnnotationStudy.countTotalAnnotationsPerCategory(item);
  double result = 0.0;
  for (Integer count : annotationsPerCategory.values())
    result += count * (count - 1);
  int raterCount = item.getRaterCount();
  if (raterCount <= 1)
    return 0.0;
  else 
    return result / (double) (raterCount - 1.0);
}
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

@Override
public int getUnitCount() {
  int result = 0;
  for (ICodingAnnotationItem item : items)
    result += item.getRaterCount();
  return result;
  //return items.size() * raters.size();
}
 
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Initializes the instance for the given annotation study. The study 
 *  should never be null. */
public HubertKappaAgreement(final ICodingAnnotationStudy study) {
  super(study);
  warnIfMissingValues();
}
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Initializes the instance for the given annotation study. The study 
 *  may never be null. */
public BennettSAgreement(final ICodingAnnotationStudy study) {
  super(study);
  ensureTwoRaters();
}
 
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Initializes the instance for the given annotation study. The study 
 *  may never be null. */
public FleissKappaAgreement(final ICodingAnnotationStudy study) {
  super(study);
  warnIfMissingValues();
}
 
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Initializes and empty annotation study for a coding task with the given 
 *  number of raters. The basic setup of a coding study is assigning 
 *  categories to units with fixed boundaries. */
public CodingAnnotationStudy(int raterCount) {
  this();
  for (int raterIdx = 0; raterIdx < raterCount; raterIdx++)
    addRater(Integer.toString(raterIdx));
}
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Print the coincidence matrix for the given coding study. */
public void print(final PrintStream out, final ICodingAnnotationStudy study) {
  Map<Object, Map<Object, Double>> coincidence = 
      CodingAnnotationStudy.countCategoryCoincidence(study);
  doPrint(out, study, coincidence);
}
 
origin: webanno/webanno

public boolean isAllNull(String aCasGroupId)
{
  for (ICodingAnnotationItem item : study.getItems()) {
    if (item.getUnit(casGroupIds.indexOf(aCasGroupId)).getCategory() != null) {
      return false;
    }
  }
  return true;
}

origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Shorthand for invoking {@link #addItem(Object...)} with the same
 *  parameters multiple times. This method is useful for modeling 
 *  annotation data based on a contingency table. */
public void addMultipleItems(int times, final Object... values) {
  for (int i = 0; i < times; i++)
    addItemAsArray(values);
}
origin: de.tudarmstadt.ukp.dkpro.statistics/dkpro-statistics-agreement

/** Calculates the expected inter-rater agreement that assumes a 
 *  uniform distribution over all raters and annotations. 
 *  @throws NullPointerException if the annotation study is null. 
 *  @throws ArithmeticException if there are no annotation categories. */
@Override
public double calculateExpectedAgreement() {
  return 1.0 / (double) study.getCategoryCount();
}
 
de.tudarmstadt.ukp.dkpro.statistics.agreement.coding

Most used classes

  • ICodingAnnotationItem
  • ICodingAnnotationStudy
  • CodingAnnotationStudy
  • CohenKappaAgreement
  • FleissKappaAgreement
  • BennettSAgreement,
  • CodingAgreementMeasure,
  • CodingAnnotationItem,
  • HubertKappaAgreement,
  • PercentageAgreement,
  • ScottPiAgreement,
  • WeightedAgreement,
  • WeightedKappaAgreement
Codota Logo
  • Products

    Search for Java codeSearch for JavaScript codeEnterprise
  • IDE Plugins

    IntelliJ IDEAWebStormAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogCodota Academy Plugin user guide Terms of usePrivacy policyJava Code IndexJavascript Code Index
Get Codota for your IDE now