Skip to content
Merged
4 changes: 4 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -367,6 +367,10 @@ jobs:
python -m pip install --no-cache-dir build
make install-all-extras-for-test

- name: Remove lightgbm on macOS (native lib requires libomp, tests hang)
if: runner.os == 'macOS'
run: python -m pip uninstall -y lightgbm

- name: Show dependencies
run: python -m pip list

Expand Down
35 changes: 35 additions & 0 deletions docs/source/_snippets/user_guide/integrations.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,41 @@ def configure_optimizers(self):
best_params = optimizer.solve()
# [end:pytorch_lightning]

# [start:lightgbm_experiment]
from lightgbm import LGBMClassifier
from sklearn.datasets import load_iris

from hyperactive.experiment.integrations import LightGBMExperiment
from hyperactive.opt.gfo import BayesianOptimizer

# Load data
X, y = load_iris(return_X_y=True)

# Create the experiment
experiment = LightGBMExperiment(
estimator=LGBMClassifier(verbosity=-1),
X=X,
y=y,
cv=3,
)

# Define search space
search_space = {
"n_estimators": [50, 100, 200],
"max_depth": [3, 5, 7, -1],
"learning_rate": [0.01, 0.05, 0.1, 0.2],
}

# Optimize
optimizer = BayesianOptimizer(
search_space=search_space,
n_iter=10,
experiment=experiment,
)
best_params = optimizer.solve()
print(f"Best parameters: {best_params}")
# [end:lightgbm_experiment]


# --- Runnable test code below ---
if __name__ == "__main__":
Expand Down
13 changes: 12 additions & 1 deletion docs/source/api_reference/experiments_integrations.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ The :mod:`hyperactive.experiment.integrations` module contains experiment classe
for integration with machine learning frameworks.

These experiments provide seamless hyperparameter optimization for scikit-learn,
sktime, skpro, and PyTorch Lightning models.
sktime, skpro, PyTorch Lightning, and LightGBM models.

Scikit-Learn
------------
Expand Down Expand Up @@ -55,3 +55,14 @@ Experiments for PyTorch Lightning models.
:template: class.rst

TorchExperiment

LightGBM
--------

Cross-validation experiments for LightGBM estimators.

.. autosummary::
:toctree: auto_generated/
:template: class.rst

LightGBMExperiment
39 changes: 38 additions & 1 deletion docs/source/user_guide/integrations.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ Framework Integrations
Hyperactive integrates with popular ML frameworks, providing drop-in replacements
for tools like ``GridSearchCV``. Each ML framework has its own conventions for training and evaluation. The integration
classes handle cross-validation setup, scoring metrics, and parameter translation, so
you can use any optimizer with scikit-learn, sktime, skpro, or PyTorch models.
you can use any optimizer with scikit-learn, sktime, skpro, PyTorch, or LightGBM models.

----

Expand Down Expand Up @@ -53,6 +53,15 @@ Supported Frameworks

Deep learning models

.. grid-item-card:: LightGBM
:class-card: sd-border-info
:link: #lightgbm-integration
:link-type: url

**LightGBMExperiment**

Gradient boosting models

----

Quick Reference
Expand Down Expand Up @@ -237,6 +246,34 @@ For deep learning hyperparameter optimization with PyTorch Lightning:

----

LightGBM Integration
--------------------

For gradient boosting hyperparameter optimization with LightGBM:

.. note::

Requires ``pip install lightgbm``

.. grid:: 1
:gutter: 0

.. grid-item::
:class: sd-bg-light sd-pt-3 sd-pb-1 sd-ps-3 sd-pe-3 sd-rounded-3

**Key Features**

- Optimize LightGBM classifiers and regressors
- LightGBM follows the sklearn API, so cross-validation works out of the box
- Supports all LightGBM hyperparameters (``n_estimators``, ``max_depth``, ``learning_rate``, etc.)

.. literalinclude:: ../_snippets/user_guide/integrations.py
:language: python
:start-after: # [start:lightgbm_experiment]
:end-before: # [end:lightgbm_experiment]

----

Tips
----

Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ all_extras = [
"optuna<5",
"cmaes", # Required for CmaEsOptimizer (optuna's CMA-ES sampler)
"lightning",
"lightgbm",
]


Expand Down
48 changes: 48 additions & 0 deletions src/hyperactive/base/tests/test_endtoend.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,3 +47,51 @@ def test_endtoend_hillclimbing():
assert isinstance(best_params, dict), "Best parameters should be a dictionary"
assert "C" in best_params, "Best parameters should contain 'C'"
assert "gamma" in best_params, "Best parameters should contain 'gamma'"


def test_endtoend_lightgbm():
"""Test end-to-end usage of HillClimbing optimizer with LightGBM experiment."""
from skbase.utils.dependencies import _check_soft_dependencies

if not _check_soft_dependencies("lightgbm", severity="none"):
return None

# 1. define the experiment
from lightgbm import LGBMClassifier
from sklearn.datasets import load_iris

from hyperactive.experiment.integrations import LightGBMExperiment

X, y = load_iris(return_X_y=True)

lgbm_exp = LightGBMExperiment(
estimator=LGBMClassifier(n_estimators=10, verbosity=-1),
X=X,
y=y,
cv=2,
)

# 2. set up the HillClimbing optimizer
import numpy as np

from hyperactive.opt import HillClimbing

hillclimbing_config = {
"search_space": {
"n_estimators": np.array([5, 10, 20]),
"max_depth": np.array([2, 3, 5]),
},
"n_iter": 10,
}
hill_climbing = HillClimbing(**hillclimbing_config, experiment=lgbm_exp)

# 3. run the HillClimbing optimizer
hill_climbing.solve()

best_params = hill_climbing.best_params_
assert best_params is not None, "Best parameters should not be None"
assert isinstance(best_params, dict), "Best parameters should be a dictionary"
assert (
"n_estimators" in best_params
), "Best parameters should contain 'n_estimators'"
assert "max_depth" in best_params, "Best parameters should contain 'max_depth'"
3 changes: 3 additions & 0 deletions src/hyperactive/experiment/integrations/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
"""Integrations with packages for tuning."""

# copyright: hyperactive developers, MIT License (see LICENSE file)

from hyperactive.experiment.integrations.lightgbm_experiment import LightGBMExperiment
from hyperactive.experiment.integrations.sklearn_cv import SklearnCvExperiment
from hyperactive.experiment.integrations.skpro_probareg import (
SkproProbaRegExperiment,
Expand All @@ -16,6 +18,7 @@
)

__all__ = [
"LightGBMExperiment",
"SklearnCvExperiment",
"SkproProbaRegExperiment",
"SktimeClassificationExperiment",
Expand Down
98 changes: 98 additions & 0 deletions src/hyperactive/experiment/integrations/lightgbm_experiment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
"""Experiment adapter for LightGBM cross-validation experiments."""

# copyright: hyperactive developers, MIT License (see LICENSE file)

from hyperactive.experiment.integrations.sklearn_cv import SklearnCvExperiment


class LightGBMExperiment(SklearnCvExperiment):
"""Experiment adapter for LightGBM cross-validation experiments.

Thin wrapper around ``SklearnCvExperiment`` for LightGBM estimators.
LightGBM's sklearn-compatible API (``LGBMClassifier``, ``LGBMRegressor``)
works without adaptation. This class exists for discoverability, explicit
soft-dependency tracking via the ``python_dependencies`` tag, and as an
extension point for future LightGBM-specific behavior.

Parameters
----------
estimator : LGBMClassifier or LGBMRegressor
The LightGBM estimator to evaluate. Any sklearn-compatible estimator
is accepted, but LightGBM estimators are the intended use case.
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Target values.
scoring : callable or str, default=None
Scoring function. Defaults follow ``SklearnCvExperiment`` conventions:
``accuracy_score`` for classifiers, ``mean_squared_error`` for
regressors.
cv : int or cross-validation generator, default=KFold(n_splits=3, shuffle=True)
Cross-validation strategy.

Notes
-----
LightGBM prints training logs to stdout by default. Pass
``verbosity=-1`` to the estimator constructor to suppress this output.

For all remaining parameter details see ``SklearnCvExperiment``.

Examples
--------
>>> from hyperactive.experiment.integrations import LightGBMExperiment
>>> from lightgbm import LGBMClassifier
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> exp = LightGBMExperiment(
... estimator=LGBMClassifier(verbosity=-1),
... X=X,
... y=y,
... )
>>> params = {"n_estimators": 50, "max_depth": 3}
>>> score, metadata = exp.score(params)
"""

_tags = {
"authors": ["kajal-jotwani"],
"python_dependencies": "lightgbm",
}

@classmethod
def get_test_params(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator."""
from skbase.utils.dependencies import _check_soft_dependencies

if not _check_soft_dependencies("lightgbm", severity="none"):
return []

from lightgbm import LGBMClassifier, LGBMRegressor
from sklearn.datasets import load_diabetes, load_iris

X, y = load_iris(return_X_y=True)
params0 = {
"estimator": LGBMClassifier(n_estimators=10, verbosity=-1),
"X": X,
"y": y,
"cv": 2,
}

X, y = load_diabetes(return_X_y=True)
params1 = {
"estimator": LGBMRegressor(n_estimators=10, verbosity=-1),
"X": X,
"y": y,
"cv": 2,
}

return [params0, params1]

@classmethod
def _get_score_params(cls):
"""Return parameter settings for score/evaluate tests."""
from skbase.utils.dependencies import _check_soft_dependencies

if not _check_soft_dependencies("lightgbm", severity="none"):
return []

score_params = {"n_estimators": 5, "max_depth": 2}
return [score_params, score_params]
Loading