Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

# Natural Language Toolkit: Decision Tree Classifiers 

# 

# Copyright (C) 2001-2012 NLTK Project 

# Author: Edward Loper <edloper@gradient.cis.upenn.edu> 

# URL: <http://www.nltk.org/> 

# For license information, see LICENSE.TXT 

 

""" 

A classifier model that decides which label to assign to a token on 

the basis of a tree structure, where branches correspond to conditions 

on feature values, and leaves correspond to label assignments. 

""" 

from __future__ import print_function 

 

from collections import defaultdict 

 

from nltk.probability import FreqDist, MLEProbDist, entropy 

from nltk.classify.api import ClassifierI 

 

class DecisionTreeClassifier(ClassifierI): 

    def __init__(self, label, feature_name=None, decisions=None, default=None): 

        """ 

        :param label: The most likely label for tokens that reach 

            this node in the decision tree.  If this decision tree 

            has no children, then this label will be assigned to 

            any token that reaches this decision tree. 

        :param feature_name: The name of the feature that this 

            decision tree selects for. 

        :param decisions: A dictionary mapping from feature values 

            for the feature identified by ``feature_name`` to 

            child decision trees. 

        :param default: The child that will be used if the value of 

            feature ``feature_name`` does not match any of the keys in 

            ``decisions``.  This is used when constructing binary 

            decision trees. 

        """ 

        self._label = label 

        self._fname = feature_name 

        self._decisions = decisions 

        self._default = default 

 

    def labels(self): 

        labels = [self._label] 

        if self._decisions is not None: 

            for dt in self._decisions.values(): 

                labels.extend(dt.labels()) 

        if self._default is not None: 

            labels.extend(self._default.labels()) 

        return list(set(labels)) 

 

    def classify(self, featureset): 

        # Decision leaf: 

        if self._fname is None: 

            return self._label 

 

        # Decision tree: 

        fval = featureset.get(self._fname) 

        if fval in self._decisions: 

            return self._decisions[fval].classify(featureset) 

        elif self._default is not None: 

            return self._default.classify(featureset) 

        else: 

            return self._label 

 

    def error(self, labeled_featuresets): 

        errors = 0 

        for featureset, label in labeled_featuresets: 

            if self.classify(featureset) != label: 

                errors += 1 

        return float(errors)/len(labeled_featuresets) 

 

    def pp(self, width=70, prefix='', depth=4): 

        """ 

        Return a string containing a pretty-printed version of this 

        decision tree.  Each line in this string corresponds to a 

        single decision tree node or leaf, and indentation is used to 

        display the structure of the decision tree. 

        """ 

        # [xx] display default!! 

        if self._fname is None: 

            n = width-len(prefix)-15 

            return '%s%s %s\n' % (prefix, '.'*n, self._label) 

        s = '' 

        for i, (fval, result) in enumerate(sorted(self._decisions.items())): 

            hdr = '%s%s=%s? ' % (prefix, self._fname, fval) 

            n = width-15-len(hdr) 

            s += '%s%s %s\n' % (hdr, '.'*(n), result._label) 

            if result._fname is not None and depth>1: 

                s += result.pp(width, prefix+'  ', depth-1) 

        if self._default is not None: 

            n = width-len(prefix)-21 

            s += '%selse: %s %s\n' % (prefix, '.'*n, self._default._label) 

            if self._default._fname is not None and depth>1: 

                s += self._default.pp(width, prefix+'  ', depth-1) 

        return s 

 

    def pseudocode(self, prefix='', depth=4): 

        """ 

        Return a string representation of this decision tree that 

        expresses the decisions it makes as a nested set of pseudocode 

        if statements. 

        """ 

        if self._fname is None: 

            return "%sreturn %r\n" % (prefix, self._label) 

        s = '' 

        for (fval, result) in sorted(self._decisions.items()): 

            s += '%sif %s == %r: ' % (prefix, self._fname, fval) 

            if result._fname is not None and depth>1: 

                s += '\n'+result.pseudocode(prefix+'  ', depth-1) 

            else: 

                s += 'return %r\n' % result._label 

        if self._default is not None: 

            if len(self._decisions) == 1: 

                s += '%sif %s != %r: '% (prefix, self._fname, 

                                         self._decisions.keys()[0]) 

            else: 

                s += '%selse: ' % (prefix,) 

            if self._default._fname is not None and depth>1: 

                s += '\n'+self._default.pseudocode(prefix+'  ', depth-1) 

            else: 

                s += 'return %r\n' % self._default._label 

        return s 

 

    def __str__(self): 

        return self.pp() 

 

    @staticmethod 

    def train(labeled_featuresets, entropy_cutoff=0.05, depth_cutoff=100, 

              support_cutoff=10, binary=False, feature_values=None, 

              verbose=False): 

        """ 

        :param binary: If true, then treat all feature/value pairs a 

            individual binary features, rather than using a single n-way 

            branch for each feature. 

        """ 

        # Collect a list of all feature names. 

        feature_names = set() 

        for featureset, label in labeled_featuresets: 

            for fname in featureset: 

                feature_names.add(fname) 

 

        # Collect a list of the values each feature can take. 

        if feature_values is None and binary: 

            feature_values = defaultdict(set) 

            for featureset, label in labeled_featuresets: 

                for fname, fval in featureset.items(): 

                    feature_values[fname].add(fval) 

 

        # Start with a stump. 

        if not binary: 

            tree = DecisionTreeClassifier.best_stump( 

                feature_names, labeled_featuresets, verbose) 

        else: 

            tree = DecisionTreeClassifier.best_binary_stump( 

                feature_names, labeled_featuresets, feature_values, verbose) 

 

        # Refine the stump. 

        tree.refine(labeled_featuresets, entropy_cutoff, depth_cutoff-1, 

                    support_cutoff, binary, feature_values, verbose) 

 

        # Return it 

        return tree 

 

    @staticmethod 

    def leaf(labeled_featuresets): 

        label = FreqDist([label for (featureset,label) 

                          in labeled_featuresets]).max() 

        return DecisionTreeClassifier(label) 

 

    @staticmethod 

    def stump(feature_name, labeled_featuresets): 

        label = FreqDist([label for (featureset,label) 

                          in labeled_featuresets]).max() 

 

        # Find the best label for each value. 

        freqs = defaultdict(FreqDist) # freq(label|value) 

        for featureset, label in labeled_featuresets: 

            feature_value = featureset.get(feature_name) 

            freqs[feature_value].inc(label) 

 

        decisions = dict([(val, DecisionTreeClassifier(freqs[val].max())) 

                          for val in freqs]) 

        return DecisionTreeClassifier(label, feature_name, decisions) 

 

    def refine(self, labeled_featuresets, entropy_cutoff, depth_cutoff, 

               support_cutoff, binary=False, feature_values=None, 

               verbose=False): 

        if len(labeled_featuresets) <= support_cutoff: return 

        if self._fname is None: return 

        if depth_cutoff <= 0: return 

        for fval in self._decisions: 

            fval_featuresets = [(featureset,label) for (featureset,label) 

                                in labeled_featuresets 

                                if featureset.get(self._fname) == fval] 

 

            label_freqs = FreqDist([label for (featureset,label) 

                                    in fval_featuresets]) 

            if entropy(MLEProbDist(label_freqs)) > entropy_cutoff: 

                self._decisions[fval] = DecisionTreeClassifier.train( 

                    fval_featuresets, entropy_cutoff, depth_cutoff, 

                    support_cutoff, binary, feature_values, verbose) 

        if self._default is not None: 

            default_featuresets = [(featureset, label) for (featureset, label) 

                                   in labeled_featuresets 

                                   if featureset.get(self._fname) not in 

                                   self._decisions] 

            label_freqs = FreqDist([label for (featureset,label) 

                                    in default_featuresets]) 

            if entropy(MLEProbDist(label_freqs)) > entropy_cutoff: 

                self._default = DecisionTreeClassifier.train( 

                    default_featuresets, entropy_cutoff, depth_cutoff, 

                    support_cutoff, binary, feature_values, verbose) 

 

    @staticmethod 

    def best_stump(feature_names, labeled_featuresets, verbose=False): 

        best_stump = DecisionTreeClassifier.leaf(labeled_featuresets) 

        best_error = best_stump.error(labeled_featuresets) 

        for fname in feature_names: 

            stump = DecisionTreeClassifier.stump(fname, labeled_featuresets) 

            stump_error = stump.error(labeled_featuresets) 

            if stump_error < best_error: 

                best_error = stump_error 

                best_stump = stump 

        if verbose: 

            print(('best stump for %6d toks uses %-20s err=%6.4f' % 

                   (len(labeled_featuresets), best_stump._fname, best_error))) 

        return best_stump 

 

    @staticmethod 

    def binary_stump(feature_name, feature_value, labeled_featuresets): 

        label = FreqDist([label for (featureset,label) 

                          in labeled_featuresets]).max() 

 

        # Find the best label for each value. 

        pos_fdist = FreqDist() 

        neg_fdist = FreqDist() 

        for featureset, label in labeled_featuresets: 

            if featureset.get(feature_name) == feature_value: 

                pos_fdist.inc(label) 

            else: 

                neg_fdist.inc(label) 

 

        decisions = {feature_value: DecisionTreeClassifier(pos_fdist.max())} 

        default = DecisionTreeClassifier(neg_fdist.max()) 

        return DecisionTreeClassifier(label, feature_name, decisions, default) 

 

    @staticmethod 

    def best_binary_stump(feature_names, labeled_featuresets, feature_values, 

                          verbose=False): 

        best_stump = DecisionTreeClassifier.leaf(labeled_featuresets) 

        best_error = best_stump.error(labeled_featuresets) 

        for fname in feature_names: 

            for fval in feature_values[fname]: 

                stump = DecisionTreeClassifier.binary_stump( 

                    fname, fval, labeled_featuresets) 

                stump_error = stump.error(labeled_featuresets) 

                if stump_error < best_error: 

                    best_error = stump_error 

                    best_stump = stump 

        if best_stump._decisions: 

            descr = '%s=%s' % (best_stump._fname, 

                               best_stump._decisions.keys()[0]) 

        else: 

            descr = '(default)' 

        if verbose: 

            print(('best stump for %6d toks uses %-20s err=%6.4f' % 

                   (len(labeled_featuresets), descr, best_error))) 

        return best_stump 

 

##////////////////////////////////////////////////////// 

##  Demo 

##////////////////////////////////////////////////////// 

 

def f(x): 

    return DecisionTreeClassifier.train(x, binary=True, verbose=True) 

 

def demo(): 

    from nltk.classify.util import names_demo, binary_names_demo_features 

    classifier = names_demo(f, #DecisionTreeClassifier.train, 

                            binary_names_demo_features) 

    print(classifier.pp(depth=7)) 

    print(classifier.pseudocode(depth=7)) 

 

if __name__ == '__main__': 

    demo()