sklearnGBDTソースコードの読み取り

CSDN

ドキュメントの例:

勾配ブースティング回帰

APIドキュメント:

sklearn.ensemble.GradientBoostingClassifier

sklearn.ensemble.GradientBoostingRegressor

sklearn.ensemble.HistGradientBoostingClassifier

ワイルドなブログと実装:

数学的導出+機械学習アルゴリズム15の純粋なPython実装:GBDT

勾配ブースティングツリー(GBDT)の原理の要約

scikit-learn勾配ブースティングツリー(GBDT)チューニングパラメーターの概要

gbdtの残余が負の勾配に置き換えられるのはなぜですか?


  • ls
class LeastSquaresError(RegressionLossFunction):
    def init_estimator(self):
        return DummyRegressor(strategy='mean')

    def __call__(self, y, raw_predictions, sample_weight=None):
        return (1 / sample_weight.sum() * np.sum(
            sample_weight * ((y - raw_predictions.ravel()) ** 2)))

    def negative_gradient(self, y, raw_predictions, **kargs):
        return y - raw_predictions.ravel()
  • lad
class LeastAbsoluteError(RegressionLossFunction):
    def init_estimator(self):
        return DummyRegressor(strategy='quantile', quantile=.5)

    def __call__(self, y, raw_predictions, sample_weight=None):
        return (1 / sample_weight.sum() * np.sum(
            sample_weight * np.abs(y - raw_predictions.ravel())))

    def negative_gradient(self, y, raw_predictions, **kargs):
        raw_predictions = raw_predictions.ravel()
        return 2 * (y - raw_predictions > 0) - 1
  • huber
class HuberLossFunction(RegressionLossFunction):
    def __init__(self, alpha=0.9):
        super().__init__()
        self.alpha = alpha
        self.gamma = None

    def init_estimator(self):
        return DummyRegressor(strategy='quantile', quantile=.5)

    def __call__(self, y, raw_predictions, sample_weight=None):
        raw_predictions = raw_predictions.ravel()
        diff = y - raw_predictions
        gamma = self.gamma
        if gamma is None:
            if sample_weight is None:
                gamma = np.percentile(np.abs(diff), self.alpha * 100)
            else:
                gamma = _weighted_percentile(np.abs(diff), sample_weight,
                                             self.alpha * 100)

        gamma_mask = np.abs(diff) <= gamma
        sq_loss = np.sum(0.5 * sample_weight[gamma_mask] *
                         diff[gamma_mask] ** 2)
        lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
                          (np.abs(diff[~gamma_mask]) - gamma / 2))
        loss = (sq_loss + lin_loss) / sample_weight.sum()
        return loss

    def negative_gradient(self, y, raw_predictions, sample_weight=None,
                          **kargs):
        raw_predictions = raw_predictions.ravel()
        diff = y - raw_predictions
        if sample_weight is None:
            gamma = np.percentile(np.abs(diff), self.alpha * 100)
        else:
            gamma = _weighted_percentile(np.abs(diff), sample_weight,
                                         self.alpha * 100)
        gamma_mask = np.abs(diff) <= gamma
        residual = np.zeros((y.shape[0],), dtype=np.float64)
        residual[gamma_mask] = diff[gamma_mask]
        residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
        self.gamma = gamma
        return residual
  • quantile
class QuantileLossFunction(RegressionLossFunction):
    def __init__(self, alpha=0.9):
        super().__init__()
        self.alpha = alpha
        self.percentile = alpha * 100

    def init_estimator(self):
        return DummyRegressor(strategy='quantile', quantile=self.alpha)

    def __call__(self, y, raw_predictions, sample_weight=None):
        raw_predictions = raw_predictions.ravel()
        diff = y - raw_predictions
        alpha = self.alpha

        mask = y > raw_predictions
        if sample_weight is None:
            loss = (alpha * diff[mask].sum() -
                    (1 - alpha) * diff[~mask].sum()) / y.shape[0]
        else:
            loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) -
                    (1 - alpha) * np.sum(sample_weight[~mask] *
                                         diff[~mask])) / sample_weight.sum())
        return loss

    def negative_gradient(self, y, raw_predictions, **kargs):
        alpha = self.alpha
        raw_predictions = raw_predictions.ravel()
        mask = y > raw_predictions
        return (alpha * mask) - ((1 - alpha) * ~mask)
  • deviance

二項偏差

from scipy.special import expit, logsumexp

class BinomialDeviance(ClassificationLossFunction):
    def init_estimator(self):
        return DummyClassifier(strategy='prior')

    def __call__(self, y, raw_predictions, sample_weight=None):
        return (-2 / sample_weight.sum() * np.sum(
            sample_weight * ((y * raw_predictions) -
                             np.logaddexp(0, raw_predictions))))

    def negative_gradient(self, y, raw_predictions, **kargs):
        return y - expit(raw_predictions.ravel())

ここに画像の説明を挿入
この式を簡略化すると、− 2 [y⋅y^ − log(1 + ey ^)] -2 [y \ cdot \ hat {y} -log(1 + e ^ {\ hat {y}})]- 2 [及びY^l o g 1+eY^]

この損失関数はnumpy-mlとは異なり、このブログは非常に徹底的です。

Scikit二項逸脱度損失関数

この式の負の勾配を押し下げます

= y --ey ^ 1 + ey ^ = y --1 1 + e --y ^ =y--σ(y ^)= y- \ frac {e ^ {\ hat {y}}} {1 + e ^ {\ハット{y}}} = y- \ frac {1} {1 + e ^ {-\ハット{y}}} = y- \シグマ(\ハット{y}) =Y1 + eY^eY^=Y1 + eY^1=Yσ Y^

おすすめ

転載: blog.csdn.net/TQCAI666/article/details/113252587