def test_bench_equality():
"""[Model Selection] Test benchmark correspondence with eval."""
with open(os.devnull, 'w') as f, redirect_stderr(f):
evl = Evaluator(mape_scorer, cv=5)
evl.fit(X, y, estimators={'pr': [OLS()], 'no': [OLS()]},
param_dicts={}, preprocessing={'pr': [Scale()], 'no': []})
out = benchmark(X, y, mape_scorer, 5, {'pr': [OLS()], 'no': [OLS()]},
{'pr': [Scale()], 'no': []}, None)
np.testing.assert_approx_equal(out['test_score-m']['no.ols'],
evl.results['test_score-m']['no.ols'])
评论列表
文章目录