决策树 - 算法基本实现

# -*- coding: utf-8 -*-
from math import log
import operator

def createDataSet():
    dataSet = [[1, 1, 'yes'],
               [1, 1, 'yes'],
               [1, 0, 'no'],
               [0, 1, 'no'],
               [0, 1, 'no']]
    labels = ['no surfacing','flippers']
    return dataSet, labels

# 计算熵
def calcShannonEnt(dataSet):
    # 数据的数目
    numEntries = len(dataSet)
    # 创建字典
    labelCounts = {}
    for featVec in dataSet:
        # 取数据集中每组测试数据的最后一个特征值
        currentLabel = featVec[-1]
        # 如果labelCounts中没有这个特征, 那么添加这个特征, 初始值为0
        if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0
        # 对应的特征计数器加一
        labelCounts[currentLabel] += 1
    shannonEnt = 0.0
    # 循环属性值, 计算熵
    for key in labelCounts:
        # 每个属性的概率值
        prob = float(labelCounts[key])/numEntries
        shannonEnt -= prob * log(prob,2)
    return shannonEnt

d, l = createDataSet()
# print calcShannonEnt(d)

# 划分数据集
# dataSet:数据集
# axis:划分数据集的特征
# value: 该特征的值
def splitDataSet(dataSet, axis, value):
    retDataSet = []
    for featVec in dataSet:
        if featVec[axis] == value:
            # 拷贝这组数据的值(去掉了该属性的值)
            reducedFeatVec = featVec[:axis]
            reducedFeatVec.extend(featVec[axis+1:])
            # 新的数据值,放到新的数据集中
            retDataSet.append(reducedFeatVec)
    return retDataSet


# 选择最好的数据集划分方式
def chooseBestFeatureToSplit(dataSet):
    # 每组元素去掉结果值之后的特征数目
    numFeatures = len(dataSet[0]) - 1
    # 原始的熵值
    baseEntropy = calcShannonEnt(dataSet)
    # 初始化信息增益
    bestInfoGain = 0.0 
    # 最好的划分特征
    bestFeature = -1
    for i in range(numFeatures):
        # 当前的特征集合
        # example是dataSet的每一组数据, example[i]是每一组数据的第i个元素
        # [example[i] for example in dataSet]就是 每一组数据的第i个元素的集合
        featList = [example[i] for example in dataSet]
        # set去重
        uniqueVals = set(featList)
        newEntropy = 0.0
        # 计算每种划分方式的信息熵
        for value in uniqueVals:
            subDataSet = splitDataSet(dataSet, i, value)
            prob = len(subDataSet)/float(len(dataSet))
            newEntropy += prob * calcShannonEnt(subDataSet)     
        # 计算信息增益
        infoGain = baseEntropy - newEntropy     
        # 找出信息增益最大的值,并记录这个特征的索引值
        if (infoGain > bestInfoGain):       
            bestInfoGain = infoGain        
            bestFeature = i
    return bestFeature

# print chooseBestFeatureToSplit(d) 

# 返回出现次数最多的分类名称
def majorityCnt(classList):
    classCount={}
    for vote in classList:
        if vote not in classCount.keys(): classCount[vote] = 0
        classCount[vote] += 1
    sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]

# 创建树
def createTree(dataSet,labels):
    # dataSet里面每一组数据的最后一个元素的集合
    classList = [example[-1] for example in dataSet]
    # 类别完全相同,则停止划分
    if classList.count(classList[0]) == len(classList): 
        return classList[0]
    # 遍历完所有特征时, 返回出现次数最多的特征
    if len(dataSet[0]) == 1: 
        return majorityCnt(classList)
    # 最好的划分方式
    bestFeat = chooseBestFeatureToSplit(dataSet)
    # 最好的划分标签
    bestFeatLabel = labels[bestFeat]
    # 树的一个节点
    myTree = {bestFeatLabel:{}}
    del(labels[bestFeat])
    # 该特征下的所有属性值
    featValues = [example[bestFeat] for example in dataSet]
    # 去重
    uniqueVals = set(featValues)
    # 便利属性值
    for value in uniqueVals:
        subLabels = labels[:]
        # 树的分支 = 递归下一层
        myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value),subLabels)
    return myTree                            

print createTree(d,l)

# 调用决策树做预测
# inputTree:决策树
# featLabels:测试数据标签
# testVec: 测试数据值
def classify(inputTree,featLabels,testVec):
    firstStr = inputTree.keys()[0]
    secondDict = inputTree[firstStr]
    featIndex = featLabels.index(firstStr)
    key = testVec[featIndex]
    valueOfFeat = secondDict[key]
    if isinstance(valueOfFeat, dict): 
        classLabel = classify(valueOfFeat, featLabels, testVec)
    else: classLabel = valueOfFeat
    return classLabel

dataSet, featLabels = createDataSet()
inputTree = createTree(dataSet, featLabels)
dataSet, featLabels = createDataSet()
testVec = [0,1]
print classify(inputTree,featLabels,testVec)

# 因为构建决策树耗时严重, 因此构建成功将决策树保存,然后测试时从文件中直接读取使用
# 将构建的决策树写入文件
def storeTree(inputTree,filename):
    import pickle
    fw = open(filename,'w')
    pickle.dump(inputTree,fw)
    fw.close()

# 从文件中读取决策树
def grabTree(filename):
    import pickle
    fr = open(filename)
    return pickle.load(fr)

猜你喜欢

转载自blog.csdn.net/u012678352/article/details/55194876
今日推荐