Custom Objectives

I’m working on writing my own custom objective for building trees with xgboost. I implemented a callable in python to calculate the gradient and Hessian and passed it into xgboost.train as the obj argument, however, what I noticed is that the objective parameter seems to still get used. If I pass objective=binary:logistic along with a custom obj the resulting trees would change from when I don’t. I could not find documentation on what it’s doing. Even when I use a simple custom obj version of binary:logistic with no objective parameter specified I get different trees. So, what is it doing when you set both obj and objective? Also, with a custom obj callable is it still applying regularization?

You should upgrade to the latest XGBoost, so that your custom objective will receive the raw (un-transformed) prediction. Prior to 1.1.0 release, the custom objective would receive the transformed prediction, which depended on objective parameter.

See the demos in https://github.com/dmlc/xgboost/blob/master/demo/guide-python/custom_objective.py and https://github.com/dmlc/xgboost/blob/master/demo/guide-python/custom_softmax.py

Does this change apply to the latest R release as well?

Yes, it applies to the R package as well.

my xgboost version is 1.4.2
with version I had the same question. i implemented customer objective, but the objective parameter seems to still get used。Here is my code and it’s output

my code

import numpy as np
import xgboost as xgb
sample_num = 10
feature_num = 2

print(xgb.__version__)

np.random.seed(0)
data = np.random.randn(sample_num, feature_num)
np.random.seed(0)
label = np.random.randint(0, 2, sample_num)
train_data = xgb.DMatrix(data, label=label)
print(data)
print(label)



# objective binary:logistic
watchlist = [(train_data, 'logistic')]
params = {'max_depth': 3,
          'booster':'gbtree',                 
          'objective':'binary:logistic',
          'eval_metric': 'logloss'
          }
evals_result = {}
bst = xgb.train(params, train_data, num_boost_round=1, evals=watchlist, evals_result=evals_result, verbose_eval=False)
leaf=bst.predict(train_data,output_margin=True)
prob=bst.predict(train_data,output_margin=False)
prob_leaf=1-1/(1+np.exp(leaf))
print(leaf)
print(prob)
print(prob_leaf)
print(evals_result)


# objective binary:logitraw
watchlist = [(train_data, 'logitraw')]
params = {'max_depth': 3,
          'booster':'gbtree',                   # 弱学习器
          'objective':'binary:logitraw',
          'eval_metric': 'logloss'
          }
evals_result = {}
bst = xgb.train(params, train_data, num_boost_round=1, evals=watchlist, evals_result=evals_result, verbose_eval=False)
leaf=bst.predict(train_data,output_margin=True)
prob=bst.predict(train_data,output_margin=False)
prob_leaf=1-1/(1+np.exp(leaf))
print(leaf)
print(prob)
print(prob_leaf)
print(evals_result)

# objective binary:hinge
watchlist = [(train_data, 'hinge')]
params = {'max_depth': 3,
          'booster':'gbtree',                   # 弱学习器
          'objective':'binary:hinge',
          'eval_metric': 'logloss'
          }
evals_result = {}
bst = xgb.train(params, train_data, num_boost_round=1, evals=watchlist, evals_result=evals_result, verbose_eval=False)
leaf=bst.predict(train_data,output_margin=True)
prob=bst.predict(train_data,output_margin=False)
prob_leaf=1-1/(1+np.exp(leaf))
print(leaf)
print(prob)
print(prob_leaf)
print(evals_result)


# objective parameter 
def logregobj(preds, dtrain):
    labels = dtrain.get_label()
    preds = 1.0 / (1.0 + np.exp(-preds))  # transform raw leaf weight
    grad = preds - labels
    hess = preds * (1.0 - preds)
    return grad, hess

# binary:logistic
params = {'max_depth': 3,
          'booster':'gbtree',                   # 弱学习器
          'objective':'binary:logistic',
          'eval_metric': 'logloss'
          }
evals_result = {}
watchlist = [(train_data, 'obj_logistic')]
bst = xgb.train(params, train_data, num_boost_round=1, obj=logregobj, evals=watchlist, evals_result=evals_result, verbose_eval=False)
leaf=bst.predict(train_data,output_margin=True)
prob=bst.predict(train_data,output_margin=False)
prob_leaf=1-1/(1+np.exp(leaf))
print(leaf)
print(prob)
print(prob_leaf)
print(evals_result)


# binary:logitraw
params = {'max_depth': 3,
          'booster':'gbtree',                   # 弱学习器
          'objective':'binary:logitraw',
          'eval_metric': 'logloss'
          }

evals_result = {}
watchlist = [(train_data, 'obj_logitraw')]
bst = xgb.train(params, train_data, num_boost_round=1, obj=logregobj, evals=watchlist, evals_result=evals_result, verbose_eval=False)
leaf=bst.predict(train_data,output_margin=True)
prob=bst.predict(train_data,output_margin=False)
prob_leaf=1-1/(1+np.exp(leaf))
print(leaf)
print(prob)
print(prob_leaf)
print(evals_result)

# binary:hinge
params = {'max_depth': 3,
          'booster':'gbtree',                   # 弱学习器
          'objective':'binary:hinge',
          'eval_metric': 'logloss'
          }
evals_result = {}
watchlist = [(train_data, 'obj_hinge')]
bst = xgb.train(params, train_data, num_boost_round=1, obj=logregobj, evals=watchlist, evals_result=evals_result, verbose_eval=False)
leaf=bst.predict(train_data,output_margin=True)
prob=bst.predict(train_data,output_margin=False)
prob_leaf=1-1/(1+np.exp(leaf))
print(leaf)
print(prob)
print(prob_leaf)
print(evals_result)

outputs

[[ 1.76405235  0.40015721]
 [ 0.97873798  2.2408932 ]
 [ 1.86755799 -0.97727788]
 [ 0.95008842 -0.15135721]
 [-0.10321885  0.4105985 ]
 [ 0.14404357  1.45427351]
 [ 0.76103773  0.12167502]
 [ 0.44386323  0.33367433]
 [ 1.49407907 -0.20515826]
 [ 0.3130677  -0.85409574]]
[0 1 1 0 1 1 1 1 1 1]
[0.06666667 0.06666667 0.06666667 0.06666667 0.33333337 0.33333337
 0.33333337 0.33333337 0.06666667 0.33333337]
[0.51666045 0.51666045 0.51666045 0.51666045 0.5825702  0.5825702
 0.5825702  0.5825702  0.51666045 0.5825702 ]
[0.5166605 0.5166605 0.5166605 0.5166605 0.5825702 0.5825702 0.5825702
 0.5825702 0.5166605 0.5825702]
{'logistic': OrderedDict([('logloss', [0.613671])])}
[0.48451093 0.48451093 0.48451093 0.48451093 0.7603707  0.7603707
 0.7603707  0.7603707  0.48451093 0.7603707 ]
[0.48451093 0.48451093 0.48451093 0.48451093 0.7603707  0.7603707
 0.7603707  0.7603707  0.48451093 0.7603707 ]
[0.6188125 0.6188125 0.6188125 0.6188125 0.6814342 0.6814342 0.6814342
 0.6814342 0.6188125 0.6814342]
{'logitraw': OrderedDict([('logloss', [0.486887])])}
[0.29999998 0.65       0.70000005 0.29999998 0.75       0.75
 0.75       0.75       0.70000005 0.75      ]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[0.5744425  0.65701044 0.66818774 0.5744425  0.6791787  0.6791787
 0.6791787  0.6791787  0.66818774 0.6791787 ]
{'hinge': OrderedDict([('logloss', [7.368272])])}
[0.06666667 0.06666667 0.06666667 0.06666667 0.33333337 0.33333337
 0.33333337 0.33333337 0.06666667 0.33333337]
[0.51666045 0.51666045 0.51666045 0.51666045 0.5825702  0.5825702
 0.5825702  0.5825702  0.51666045 0.5825702 ]
[0.5166605 0.5166605 0.5166605 0.5166605 0.5825702 0.5825702 0.5825702
 0.5825702 0.5166605 0.5825702]
{'obj_logistic': OrderedDict([('logloss', [0.613671])])}
[0.48451093 0.48451093 0.48451093 0.48451093 0.7603707  0.7603707
 0.7603707  0.7603707  0.48451093 0.7603707 ]
[0.48451093 0.48451093 0.48451093 0.48451093 0.7603707  0.7603707
 0.7603707  0.7603707  0.48451093 0.7603707 ]
[0.6188125 0.6188125 0.6188125 0.6188125 0.6814342 0.6814342 0.6814342
 0.6814342 0.6188125 0.6814342]
{'obj_logitraw': OrderedDict([('logloss', [0.486887])])}
[0.48451093 0.48451093 0.48451093 0.48451093 0.7603707  0.7603707
 0.7603707  0.7603707  0.48451093 0.7603707 ]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[0.6188125 0.6188125 0.6188125 0.6188125 0.6814342 0.6814342 0.6814342
 0.6814342 0.6188125 0.6814342]
{'obj_hinge': OrderedDict([('logloss', [7.368272])])}

I update my xgboost to 1.4.2 . but i find it does not work

I just installed XGBoost 1.4.2 and it works for me.

Python code:

import numpy as np
import xgboost as xgb
sample_num = 10
feature_num = 2

print(xgb.__version__)

np.random.seed(0)
data = np.random.randn(sample_num, feature_num)
np.random.seed(0)
label = np.random.randint(0, 2, sample_num)
train_data = xgb.DMatrix(data, label=label)
print(data)
print(label)


# objective parameter 
def logregobj(preds, dtrain):
    labels = dtrain.get_label()
    preds = 1.0 / (1.0 + np.exp(-preds))  # transform raw leaf weight
    grad = preds - labels
    hess = preds * (1.0 - preds)
    return grad, hess

for objective in ['binary:logistic', 'binary:logitraw', 'binary:hinge']:
    print(f'====objective = {objective}====')
    params = {'max_depth': 3,
              'booster':'gbtree',                   # 弱学习器
              'objective':'binary:logistic',
              'eval_metric': 'logloss'
              }
    evals_result = {}
    watchlist = [(train_data, 'obj_logistic')]
    bst = xgb.train(params, train_data, num_boost_round=2, obj=logregobj, evals=watchlist, evals_result=evals_result, verbose_eval=False)
    leaf=bst.predict(train_data, output_margin=True)
    prob=bst.predict(train_data, output_margin=False)
    prob_leaf=1-1/(1+np.exp(leaf))
    print(leaf)
    print(prob)
    print(prob_leaf)
    print(evals_result)

Output:

1.4.2
[[ 1.76405235  0.40015721]
 [ 0.97873798  2.2408932 ]
 [ 1.86755799 -0.97727788]
 [ 0.95008842 -0.15135721]
 [-0.10321885  0.4105985 ]
 [ 0.14404357  1.45427351]
 [ 0.76103773  0.12167502]
 [ 0.44386323  0.33367433]
 [ 1.49407907 -0.20515826]
 [ 0.3130677  -0.85409574]]
[0 1 1 0 1 1 1 1 1 1]
====objective = binary:logistic====
[0.12226062 0.12226062 0.12226062 0.12226062 0.615901   0.615901
 0.615901   0.615901   0.12226062 0.615901  ]
[0.5305271  0.5305271  0.5305271  0.5305271  0.64928573 0.64928573
 0.64928573 0.64928573 0.5305271  0.64928573]
[0.5305271 0.5305271 0.5305271 0.5305271 0.6492857 0.6492857 0.6492857
 0.6492857 0.5305271 0.6492857]
{'obj_logistic': OrderedDict([('logloss', [0.613671, 0.557335])])}
====objective = binary:logitraw====
[0.12226062 0.12226062 0.12226062 0.12226062 0.615901   0.615901
 0.615901   0.615901   0.12226062 0.615901  ]
[0.5305271  0.5305271  0.5305271  0.5305271  0.64928573 0.64928573
 0.64928573 0.64928573 0.5305271  0.64928573]
[0.5305271 0.5305271 0.5305271 0.5305271 0.6492857 0.6492857 0.6492857
 0.6492857 0.5305271 0.6492857]
{'obj_logistic': OrderedDict([('logloss', [0.613671, 0.557335])])}
====objective = binary:hinge====
[0.12226062 0.12226062 0.12226062 0.12226062 0.615901   0.615901
 0.615901   0.615901   0.12226062 0.615901  ]
[0.5305271  0.5305271  0.5305271  0.5305271  0.64928573 0.64928573
 0.64928573 0.64928573 0.5305271  0.64928573]
[0.5305271 0.5305271 0.5305271 0.5305271 0.6492857 0.6492857 0.6492857
 0.6492857 0.5305271 0.6492857]
{'obj_logistic': OrderedDict([('logloss', [0.613671, 0.557335])])}

yes, you can change ‘objective’:‘binary:logistic’ with {objective}。you will find the difference. what’s more i find the reason, different objective have different base_score.


import numpy as np
import xgboost as xgb
sample_num = 10
feature_num = 2

print(xgb.__version__)

np.random.seed(0)
data = np.random.randn(sample_num, feature_num)
np.random.seed(0)
label = np.random.randint(0, 2, sample_num)
train_data = xgb.DMatrix(data, label=label)
print(data)
print(label)


# objective parameter 
def logregobj(preds, dtrain):
    labels = dtrain.get_label()
    preds = 1.0 / (1.0 + np.exp(-preds))  # transform raw leaf weight
    grad = preds - labels
    hess = preds * (1.0 - preds)
    return grad, hess

for objective in ['binary:logistic', 'binary:logitraw', 'binary:hinge']:
    print(f'====objective = {objective}====')
    params = {'max_depth': 3,
              'booster':'gbtree',                   # 弱学习器
              'objective':{objective},
              'eval_metric': 'logloss'
              }
    evals_result = {}
    watchlist = [(train_data, 'obj_logistic')]
    bst = xgb.train(params, train_data, num_boost_round=2, obj=logregobj, evals=watchlist, evals_result=evals_result, verbose_eval=False)
    leaf=bst.predict(train_data, output_margin=True)
    prob=bst.predict(train_data, output_margin=False)
    prob_leaf=1-1/(1+np.exp(leaf))
    print(leaf)
    print(prob)
    print(prob_leaf)
    print(evals_result)

at last, thank you very much