Coverage for nltk.classify.decisiontree : 60%
![](keybd_closed.png)
Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
# Natural Language Toolkit: Decision Tree Classifiers # # Copyright (C) 2001-2012 NLTK Project # Author: Edward Loper <edloper@gradient.cis.upenn.edu> # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT
A classifier model that decides which label to assign to a token on the basis of a tree structure, where branches correspond to conditions on feature values, and leaves correspond to label assignments. """
""" :param label: The most likely label for tokens that reach this node in the decision tree. If this decision tree has no children, then this label will be assigned to any token that reaches this decision tree. :param feature_name: The name of the feature that this decision tree selects for. :param decisions: A dictionary mapping from feature values for the feature identified by ``feature_name`` to child decision trees. :param default: The child that will be used if the value of feature ``feature_name`` does not match any of the keys in ``decisions``. This is used when constructing binary decision trees. """
labels.extend(self._default.labels())
# Decision leaf:
# Decision tree: return self._default.classify(featureset) else:
""" Return a string containing a pretty-printed version of this decision tree. Each line in this string corresponds to a single decision tree node or leaf, and indentation is used to display the structure of the decision tree. """ # [xx] display default!! n = width-len(prefix)-15 return '%s%s %s\n' % (prefix, '.'*n, self._label) n = width-len(prefix)-21 s += '%selse: %s %s\n' % (prefix, '.'*n, self._default._label) if self._default._fname is not None and depth>1: s += self._default.pp(width, prefix+' ', depth-1)
""" Return a string representation of this decision tree that expresses the decisions it makes as a nested set of pseudocode if statements. """ if self._fname is None: return "%sreturn %r\n" % (prefix, self._label) s = '' for (fval, result) in sorted(self._decisions.items()): s += '%sif %s == %r: ' % (prefix, self._fname, fval) if result._fname is not None and depth>1: s += '\n'+result.pseudocode(prefix+' ', depth-1) else: s += 'return %r\n' % result._label if self._default is not None: if len(self._decisions) == 1: s += '%sif %s != %r: '% (prefix, self._fname, self._decisions.keys()[0]) else: s += '%selse: ' % (prefix,) if self._default._fname is not None and depth>1: s += '\n'+self._default.pseudocode(prefix+' ', depth-1) else: s += 'return %r\n' % self._default._label return s
support_cutoff=10, binary=False, feature_values=None, verbose=False): """ :param binary: If true, then treat all feature/value pairs a individual binary features, rather than using a single n-way branch for each feature. """ # Collect a list of all feature names.
# Collect a list of the values each feature can take. feature_values = defaultdict(set) for featureset, label in labeled_featuresets: for fname, fval in featureset.items(): feature_values[fname].add(fval)
# Start with a stump. feature_names, labeled_featuresets, verbose) else: tree = DecisionTreeClassifier.best_binary_stump( feature_names, labeled_featuresets, feature_values, verbose)
# Refine the stump. support_cutoff, binary, feature_values, verbose)
# Return it
def leaf(labeled_featuresets): in labeled_featuresets]).max()
def stump(feature_name, labeled_featuresets): in labeled_featuresets]).max()
# Find the best label for each value.
for val in freqs])
support_cutoff, binary=False, feature_values=None, verbose=False): in labeled_featuresets if featureset.get(self._fname) == fval]
in fval_featuresets]) fval_featuresets, entropy_cutoff, depth_cutoff, support_cutoff, binary, feature_values, verbose) default_featuresets = [(featureset, label) for (featureset, label) in labeled_featuresets if featureset.get(self._fname) not in self._decisions] label_freqs = FreqDist([label for (featureset,label) in default_featuresets]) if entropy(MLEProbDist(label_freqs)) > entropy_cutoff: self._default = DecisionTreeClassifier.train( default_featuresets, entropy_cutoff, depth_cutoff, support_cutoff, binary, feature_values, verbose)
print(('best stump for %6d toks uses %-20s err=%6.4f' % (len(labeled_featuresets), best_stump._fname, best_error)))
def binary_stump(feature_name, feature_value, labeled_featuresets): label = FreqDist([label for (featureset,label) in labeled_featuresets]).max()
# Find the best label for each value. pos_fdist = FreqDist() neg_fdist = FreqDist() for featureset, label in labeled_featuresets: if featureset.get(feature_name) == feature_value: pos_fdist.inc(label) else: neg_fdist.inc(label)
decisions = {feature_value: DecisionTreeClassifier(pos_fdist.max())} default = DecisionTreeClassifier(neg_fdist.max()) return DecisionTreeClassifier(label, feature_name, decisions, default)
verbose=False): best_stump = DecisionTreeClassifier.leaf(labeled_featuresets) best_error = best_stump.error(labeled_featuresets) for fname in feature_names: for fval in feature_values[fname]: stump = DecisionTreeClassifier.binary_stump( fname, fval, labeled_featuresets) stump_error = stump.error(labeled_featuresets) if stump_error < best_error: best_error = stump_error best_stump = stump if best_stump._decisions: descr = '%s=%s' % (best_stump._fname, best_stump._decisions.keys()[0]) else: descr = '(default)' if verbose: print(('best stump for %6d toks uses %-20s err=%6.4f' % (len(labeled_featuresets), descr, best_error))) return best_stump
##////////////////////////////////////////////////////// ## Demo ##//////////////////////////////////////////////////////
return DecisionTreeClassifier.train(x, binary=True, verbose=True)
from nltk.classify.util import names_demo, binary_names_demo_features classifier = names_demo(f, #DecisionTreeClassifier.train, binary_names_demo_features) print(classifier.pp(depth=7)) print(classifier.pseudocode(depth=7))
demo()
|