Coverage for nltk.metrics.agreement : 46%
![](keybd_closed.png)
Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
# Natural Language Toolkit: Agreement Metrics # # Copyright (C) 2001-2012 NLTK Project # Author: Tom Lippincott <tom@cs.columbia.edu> # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT #
Implementations of inter-annotator agreement coefficients surveyed by Artstein and Poesio (2007), Inter-Coder Agreement for Computational Linguistics.
An agreement coefficient calculates the amount that annotators agreed on label assignments beyond what is expected by chance.
In defining the AnnotationTask class, we use naming conventions similar to the paper's terminology. There are three types of objects in an annotation task:
the coders (variables "c" and "C") the items to be annotated (variables "i" and "I") the potential categories to be assigned (variables "k" and "K")
Additionally, it is often the case that we don't want to treat two different labels as complete disagreement, and so the AnnotationTask constructor can also take a distance metric as a final argument. Distance metrics are simply functions that take two arguments, and return a value between 0.0 and 1.0 indicating the distance between them. If not supplied, the default is binary comparison between the arguments.
The simplest way to initialize an AnnotationTask is with a list of equal-length lists, each containing a coder's assignments for all objects in the task:
task = AnnotationTask([],[],[])
Alpha (Krippendorff 1980) Kappa (Cohen 1960) S (Bennet, Albert and Goldstein 1954) Pi (Scott 1955)
TODO: Describe handling of multiple coders and missing data
Expected results from the Artstein and Poesio survey paper:
.. doctest:: :options: +SKIP
>>> from nltk.metrics.agreement import AnnotationTask >>> import os.path >>> t = AnnotationTask(data=[x.split() for x in open(os.path.join(os.path.dirname(__file__), "artstein_poesio_example.txt"))]) >>> t.avg_Ao() 0.88 >>> t.pi() 0.79953224189776151 >>> t.S() 0.81999999999999984
"""
"""Represents an annotation task, i.e. people assign labels to items.
Notation tries to match notation in Artstein and Poesio (2007).
In general, coders and items can be represented as any hashable object. Integers, for example, are fine, though strings are more readable. Labels must support the distance functions applied to them, so e.g. a string-edit-distance makes no sense if your labels are integers, whereas interval distance needs numeric values. A notable case of this is the MASI metric, which requires Python sets. """
"""Initialize an empty annotation task.
"""
return "\r\n".join(map(lambda x:"%s\t%s\t%s" % (x['coder'], x['item'].replace('_', "\t"), ",".join(x['labels'])), self.data))
"""Load the results of annotation.
The argument is a list of 3-tuples, each representing a coder's labeling of an item: (coder,item,label) """
"""Agreement between two coders on a given item
""" cA, cB, i, ret) kA['labels'], kB['labels'], 1.0 - ret)
return float(sum(1 for x in self.data if x['labels'] == k))
return float(sum(1 for x in self.data if x['item'] == i and x['labels'] == k))
return float(sum(1 for x in self.data if x['coder'] == c and x['labels'] == k))
"""Implements the "n-notation" used in Artstein and Poesio (2007)
""" if k is not None and i is None and c is None: ret = self.Nk(k) elif k is not None and i is not None and c is None: ret = self.Nik(i, k) elif k is not None and c is not None and i is None: ret = self.Nck(c, k) else: raise ValueError("You must pass either i or c, not both! (k=%r,i=%r,c=%r)" % (k, i, c)) log.debug("Count on N[%s,%s,%s]: %d", k, i, c, ret) return ret
"""Observed agreement between two coders on all items.
"""
""" Calculates the average of function results for each coder pair """
"""Average observed agreement across all coders and items.
"""
"""The observed disagreement for the alpha coefficient.
The alpha coefficient, unlike the other metrics, uses this rather than observed agreement. """ total = 0.0 for i, itemdata in self._grouped_data('item'): label_freqs = FreqDist(x['labels'] for x in itemdata)
for j, nj in compat.iteritems(label_freqs): for l, nl in compat.iteritems(label_freqs): total += float(nj * nl) * self.distance(l, j) ret = (1.0 / float((len(self.I) * len(self.C) * (len(self.C) - 1)))) * total log.debug("Observed disagreement: %f", ret) return ret
"""The observed disagreement for the weighted kappa coefficient.
""" total = 0.0 data = (x for x in self.data if x['coder'] in (cA, cB)) for i, itemdata in self._grouped_data('item', data): # we should have two items; distance doesn't care which comes first total += self.distance(itemdata.next()['labels'], itemdata.next()['labels'])
ret = total / (len(self.I) * max_distance) log.debug("Observed disagreement between %s and %s: %f", cA, cB, ret) return ret
"""Averaged over all labelers
""" ret = self._pairwise_average(lambda cA, cB: self.Do_Kw_pairwise(cA, cB, max_distance)) log.debug("Observed disagreement: %f", ret) return ret
# Agreement Coefficients """Bennett, Albert and Goldstein 1954
"""
"""Scott 1955; here, multi-pi. Equivalent to K from Siegel and Castellan (1988).
"""
Ae = 0.0 nitems = float(len(self.I)) label_freqs = ConditionalFreqDist((x['labels'], x['coder']) for x in self.data) for k in label_freqs.conditions(): Ae += (label_freqs[k][cA] / nitems) * (label_freqs[k][cB] / nitems) return Ae
"""
""" Ae = self.Ae_kappa(cA, cB) ret = (self.Ao(cA, cB) - Ae) / (1.0 - Ae) log.debug("Expected agreement between %s and %s: %f", cA, cB, Ae) return ret
"""Cohen 1960 Averages naively over kappas for each coder pair.
""" return self._pairwise_average(self.kappa_pairwise)
"""Davies and Fleiss 1982 Averages over observed and expected agreements for each coder pair.
""" Ae = self._pairwise_average(self.Ae_kappa) return (self.avg_Ao() - Ae) / (1.0 - Ae)
"""Krippendorff 1980
""" De = 0.0
label_freqs = FreqDist(x['labels'] for x in self.data) for j in self.K: nj = label_freqs[j] for l in self.K: De += float(nj * label_freqs[l]) * self.distance(j, l) De = (1.0 / (len(self.I) * len(self.C) * (len(self.I) * len(self.C) - 1))) * De log.debug("Expected disagreement: %f", De) ret = 1.0 - (self.Do_alpha() / De) return ret
"""Cohen 1968
""" total = 0.0 label_freqs = ConditionalFreqDist((x['coder'], x['labels']) for x in self.data if x['coder'] in (cA, cB)) for j in self.K: for l in self.K: total += label_freqs[cA][j] * label_freqs[cB][l] * self.distance(j, l) De = total / (max_distance * pow(len(self.I), 2)) log.debug("Expected disagreement between %s and %s: %f", cA, cB, De) Do = self.Do_Kw_pairwise(cA, cB) ret = 1.0 - (Do / De) return ret
"""Cohen 1968
""" return self._pairwise_average(lambda cA, cB: self.weighted_kappa_pairwise(cA, cB, max_distance))
import re import optparse from . import distance
# process command-line arguments parser = optparse.OptionParser() parser.add_option("-d", "--distance", dest="distance", default="binary_distance", help="distance metric to use") parser.add_option("-a", "--agreement", dest="agreement", default="kappa", help="agreement coefficient to calculate") parser.add_option("-e", "--exclude", dest="exclude", action="append", default=[], help="coder names to exclude (may be specified multiple times)") parser.add_option("-i", "--include", dest="include", action="append", default=[], help="coder names to include, same format as exclude") parser.add_option("-f", "--file", dest="file", help="file to read labelings from, each line with three columns: 'labeler item labels'") parser.add_option("-v", "--verbose", dest="verbose", default='0', help="how much debugging to print on stderr (0-4)") parser.add_option("-c", "--columnsep", dest="columnsep", default="\t", help="char/string that separates the three columns in the file, defaults to tab") parser.add_option("-l", "--labelsep", dest="labelsep", default=",", help="char/string that separates labels (if labelers can assign more than one), defaults to comma") parser.add_option("-p", "--presence", dest="presence", default=None, help="convert each labeling into 1 or 0, based on presence of LABEL") parser.add_option("-T", "--thorough", dest="thorough", default=False, action="store_true", help="calculate agreement for every subset of the annotators") (options, remainder) = parser.parse_args()
if not options.file: parser.print_help() exit()
logging.basicConfig(level=50 - 10 * int(options.verbose))
# read in data from the specified file data = [] for l in open(options.file): toks = l.split(options.columnsep) coder, object, labels = toks[0], str(toks[1:-1]), frozenset(toks[-1].strip().split(options.labelsep)) if ((options.include == options.exclude) or (len(options.include) > 0 and coder in options.include) or (len(options.exclude) > 0 and coder not in options.exclude)): data.append((coder, object, labels))
if options.presence: task = AnnotationTask(data, getattr(distance, options.distance)(options.presence)) else: task = AnnotationTask(data, getattr(distance, options.distance))
if options.thorough: pass else: print(getattr(task, options.agreement)())
logging.shutdown()
|