Example documentation:
API documentation:
sklearn.ensemble.GradientBoostingClassifier
sklearn.ensemble.GradientBoostingRegressor
sklearn.ensemble.HistGradientBoostingClassifier
Wild blog and implementation:
Mathematical derivation + pure Python implementation of machine learning algorithm 15: GBDT
Summary of the principle of gradient boosting tree (GBDT)
Summary of scikit-learn gradient boosting tree (GBDT) tuning parameters
Why is the residual of gbdt replaced with a negative gradient?
ls
class LeastSquaresError(RegressionLossFunction):
def init_estimator(self):
return DummyRegressor(strategy='mean')
def __call__(self, y, raw_predictions, sample_weight=None):
return (1 / sample_weight.sum() * np.sum(
sample_weight * ((y - raw_predictions.ravel()) ** 2)))
def negative_gradient(self, y, raw_predictions, **kargs):
return y - raw_predictions.ravel()
lad
class LeastAbsoluteError(RegressionLossFunction):
def init_estimator(self):
return DummyRegressor(strategy='quantile', quantile=.5)
def __call__(self, y, raw_predictions, sample_weight=None):
return (1 / sample_weight.sum() * np.sum(
sample_weight * np.abs(y - raw_predictions.ravel())))
def negative_gradient(self, y, raw_predictions, **kargs):
raw_predictions = raw_predictions.ravel()
return 2 * (y - raw_predictions > 0) - 1
huber
class HuberLossFunction(RegressionLossFunction):
def __init__(self, alpha=0.9):
super().__init__()
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return DummyRegressor(strategy='quantile', quantile=.5)
def __call__(self, y, raw_predictions, sample_weight=None):
raw_predictions = raw_predictions.ravel()
diff = y - raw_predictions
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = np.percentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight,
self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] *
diff[gamma_mask] ** 2)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, raw_predictions, sample_weight=None,
**kargs):
raw_predictions = raw_predictions.ravel()
diff = y - raw_predictions
if sample_weight is None:
gamma = np.percentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight,
self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
quantile
class QuantileLossFunction(RegressionLossFunction):
def __init__(self, alpha=0.9):
super().__init__()
self.alpha = alpha
self.percentile = alpha * 100
def init_estimator(self):
return DummyRegressor(strategy='quantile', quantile=self.alpha)
def __call__(self, y, raw_predictions, sample_weight=None):
raw_predictions = raw_predictions.ravel()
diff = y - raw_predictions
alpha = self.alpha
mask = y > raw_predictions
if sample_weight is None:
loss = (alpha * diff[mask].sum() -
(1 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) -
(1 - alpha) * np.sum(sample_weight[~mask] *
diff[~mask])) / sample_weight.sum())
return loss
def negative_gradient(self, y, raw_predictions, **kargs):
alpha = self.alpha
raw_predictions = raw_predictions.ravel()
mask = y > raw_predictions
return (alpha * mask) - ((1 - alpha) * ~mask)
deviance
Binomial deviation
from scipy.special import expit, logsumexp
class BinomialDeviance(ClassificationLossFunction):
def init_estimator(self):
return DummyClassifier(strategy='prior')
def __call__(self, y, raw_predictions, sample_weight=None):
return (-2 / sample_weight.sum() * np.sum(
sample_weight * ((y * raw_predictions) -
np.logaddexp(0, raw_predictions))))
def negative_gradient(self, y, raw_predictions, **kargs):
return y - expit(raw_predictions.ravel())
Simplified this formula is − 2 [y ⋅ y ^ − log (1 + ey ^)] -2[y\cdot \hat{y}-log(1+e^{\hat{y}})]- 2 [ and⋅Y^−log(1+eY^)]
This loss function is different from numpy-ml, and this blog is very thorough:
Scikit Binomial Deviance Loss Function
Push down the negative gradient of this formula
= y - ey ^ 1 + ey ^ = y - 1 1 + e - y ^ = y - σ (y ^) = y- \ frac {e ^ {\ hat {y}}} {1 + e ^ {\ hat {y}}} = y- \ frac {1} {1 + e ^ {- \ hat {y}}} = y- \ sigma (\ hat {y}) =Y−1+eY^eY^=Y−1+e−Y^1=Y−σ (Y^)