版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/Java_Man_China/article/details/88387905
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.Vectors
import scala.collection.mutable.ArrayBuffer
/**
* @author XiaoTangBao
* @date 2019/3/10 16:00
* @version 1.0
* 基于统计学习方法--李航 例8.2 提升树(回归)
*/
object BoostingTree {
def main(args: Array[String]): Unit = {
//初始化训练数据
var orignaldata = ArrayBuffer[LabeledPoint]()
val arr = Array((1,5.56),(2,5.70),(3,5.91),(4,6.40),(5,6.80),(6,7.05),(7,8.90),(8,8.70),(9,9.00),(10,9.05))
for(ar <- arr) orignaldata.append(LabeledPoint(ar._2,Vectors.dense(ar._1)))
//定义切割点
val cutpoints = ArrayBuffer[Double]()
for(i <- 0 until arr.length-1) {cutpoints.append(arr(i)._1 + 0.5)}
//定义满足条件的损失误差Li,当损失误差 <= Li时,退出训练
val Li = 0.2
//存放最终训练好的模型
val modelArr = ArrayBuffer[Double => Double]()
var flag = true
while(flag){
val bestModel = getTree(orignaldata.toArray,cutpoints.toArray)
modelArr.append(bestModel)
//更新训练数据data,生成残差表,供下次迭代使用
for(i<-0 until orignaldata.length ){
val newLabel = orignaldata(i).label - bestModel(orignaldata(i).features(0))
orignaldata(i) = LabeledPoint(newLabel,Vectors.dense(orignaldata(i).features(0)))
}
//根据残差表计算平方损失误差lx
var sle = 0.0
for(s <- orignaldata) sle += math.pow(s.label,2)
if(sle <= Li) flag = false
}
//定义最终的模型
val finalModel =(x:Double) => {
var result = 0.0
for(model <- modelArr) result += model(x)
result
}
//准备相关的测试数据
val csdata = Array(1.4,2.5,2.8,3.5,4.0,4.5,5.6,6.5,6.7)
for(cs <- csdata) println(finalModel(cs))
}
def getTree(data:Array[LabeledPoint],cutpoints:Array[Double])= {
//存储每一次迭代产生的(cutpoint,(ms,c1,c2))
val msArr = ArrayBuffer[(Double,(Double,Double,Double))]()
for (cutpoint <- cutpoints) msArr.append((cutpoint,calms(cutpoint, data)))
val best = msArr.sortBy(x => x._2._1).take(1)(0)
//生成此次迭代的最佳模型更新训练数据data,生成残差表
val bestModel =(x:Double) =>{
val cp = best._1
val c1 = best._2._2
val c2 = best._2._3
if(x < cp) c1 else c2
}
bestModel
}
//假设回归树的损失函数为平方误差损失函数 f(x) = min[min(yi-c1)**2 + min(yi-c2)**2]
def calms(cutpoint:Double,data:Array[LabeledPoint])={
//计算误差
var ms = 0.0
//min(yi-c1)**2
var c1 = 0.0
//min(yi-c2)**2
var c2 = 0.0
//c1、c2所对应的数据和
var s1 = 0.0
var s2 = 0.0
//满足条件c1、c2的样本点个数
var n1 = 0
var n2 = 0
for(dt <- data) if(dt.features(0) <= cutpoint) {n1 += 1;s1 += dt.label} else {n2 += 1;s2 += dt.label}
c1 = s1 / n1
c2 = s2 / n2
for(dt <- data) if(dt.features(0) <= cutpoint) ms += math.pow((dt.label - c1),2) else ms += math.pow((dt.label - c2),2)
(ms,c1,c2)
}
}
----------------------------------------------------------result---------------------------------------
5.63
5.818310185185186
5.818310185185186
6.551643518518518
6.551643518518518
6.819699074074074
6.819699074074074
8.950162037037037
8.950162037037037
该实验结果完全等价于统计学习上的提升树函数