Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 39 additions & 13 deletions docs/source/tutorials/ar.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -187,10 +187,16 @@
" max_prediction_length=prediction_length,\n",
")\n",
"\n",
"validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n",
"validation = TimeSeriesDataSet.from_dataset(\n",
" training, data, min_prediction_idx=training_cutoff + 1\n",
")\n",
"batch_size = 128\n",
"train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)\n",
"val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)"
"train_dataloader = training.to_dataloader(\n",
" train=True, batch_size=batch_size, num_workers=0\n",
")\n",
"val_dataloader = validation.to_dataloader(\n",
" train=False, batch_size=batch_size, num_workers=0\n",
")"
]
},
{
Expand Down Expand Up @@ -251,7 +257,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {},
"outputs": [
{
Expand All @@ -269,12 +275,18 @@
"source": [
"pl.seed_everything(42)\n",
"trainer = pl.Trainer(accelerator=\"auto\", gradient_clip_val=0.1)\n",
"net = NBeats.from_dataset(training, learning_rate=3e-2, weight_decay=1e-2, widths=[32, 512], backcast_loss_ratio=0.1)"
"net = NBeats.from_dataset(\n",
" training,\n",
" learning_rate=3e-2,\n",
" weight_decay=1e-2,\n",
" widths=[32, 512],\n",
" backcast_loss_ratio=0.1,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": null,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -321,9 +333,18 @@
],
"source": [
"# find optimal learning rate\n",
"from lightning.pytorch.tuner import Tuner\n",
"# from lightning.pytorch.tuner import Tuner\n",
"# todo: update when lightning.pytorch.tuner allows weights_only param\n",
"from pytorch_forecasting.models.temporal_fusion_transformer.tuning import (\n",
" _NewTuner as Tuner,\n",
")\n",
"\n",
"res = Tuner(trainer).lr_find(net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5)\n",
"res = Tuner(trainer).lr_find(\n",
" net,\n",
" train_dataloaders=train_dataloader,\n",
" val_dataloaders=val_dataloader,\n",
" min_lr=1e-5,\n",
")\n",
"print(f\"suggested learning rate: {res.suggestion()}\")\n",
"fig = res.plot(show=True, suggest=True)\n",
"fig.show()\n",
Expand All @@ -340,7 +361,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": null,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -443,7 +464,9 @@
}
],
"source": [
"early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n",
"early_stop_callback = EarlyStopping(\n",
" monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n",
")\n",
"trainer = pl.Trainer(\n",
" max_epochs=3,\n",
" accelerator=\"auto\",\n",
Expand All @@ -468,6 +491,7 @@
" net,\n",
" train_dataloaders=train_dataloader,\n",
" val_dataloaders=val_dataloader,\n",
" weights_only=False,\n",
")"
]
},
Expand All @@ -481,12 +505,12 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_model_path = trainer.checkpoint_callback.best_model_path\n",
"best_model = NBeats.load_from_checkpoint(best_model_path)"
"best_model = NBeats.load_from_checkpoint(best_model_path, weights_only=False)"
]
},
{
Expand Down Expand Up @@ -645,7 +669,9 @@
],
"source": [
"for idx in range(10): # plot 10 examples\n",
" best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)"
" best_model.plot_prediction(\n",
" raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n",
" )"
]
},
{
Expand Down
16 changes: 10 additions & 6 deletions docs/source/tutorials/deepar.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": null,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -348,7 +348,11 @@
],
"source": [
"# find optimal learning rate\n",
"from lightning.pytorch.tuner import Tuner\n",
"# from lightning.pytorch.tuner import Tuner\n",
"# todo: update when lightning.pytorch.tuner allows weights_only param\n",
"from pytorch_forecasting.models.temporal_fusion_transformer.tuning import (\n",
" _NewTuner as Tuner,\n",
")\n",
"\n",
"res = Tuner(trainer).lr_find(\n",
" net,\n",
Expand Down Expand Up @@ -883,12 +887,12 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_model_path = trainer.checkpoint_callback.best_model_path\n",
"best_model = DeepAR.load_from_checkpoint(best_model_path)"
"best_model = DeepAR.load_from_checkpoint(best_model_path, weights_only=False)"
]
},
{
Expand Down Expand Up @@ -1268,7 +1272,7 @@
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"display_name": ".venv (3.12.3)",
"language": "python",
"name": "python3"
},
Expand All @@ -1282,7 +1286,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
"version": "3.12.3"
}
},
"nbformat": 4,
Expand Down
12 changes: 8 additions & 4 deletions docs/source/tutorials/nhits.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": null,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -353,7 +353,11 @@
],
"source": [
"# find optimal learning rate\n",
"from lightning.pytorch.tuner import Tuner\n",
"# from lightning.pytorch.tuner import Tuner\n",
"# todo: update when lightning.pytorch.tuner allows weights_only param\n",
"from pytorch_forecasting.models.temporal_fusion_transformer.tuning import (\n",
" _NewTuner as Tuner,\n",
")\n",
"\n",
"res = Tuner(trainer).lr_find(\n",
" net,\n",
Expand Down Expand Up @@ -553,12 +557,12 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"best_model_path = trainer.checkpoint_callback.best_model_path\n",
"best_model = NHiTS.load_from_checkpoint(best_model_path)"
"best_model = NHiTS.load_from_checkpoint(best_model_path, weights_only=False)"
]
},
{
Expand Down
14 changes: 10 additions & 4 deletions docs/source/tutorials/stallion.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1012,7 +1012,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": null,
"metadata": {
"collapsed": false,
"jupyter": {
Expand Down Expand Up @@ -1067,7 +1067,11 @@
],
"source": [
"# find optimal learning rate\n",
"from lightning.pytorch.tuner import Tuner\n",
"# from lightning.pytorch.tuner import Tuner\n",
"# todo: update when lightning.pytorch.tuner allows weights_only param\n",
"from pytorch_forecasting.models.temporal_fusion_transformer.tuning import (\n",
" _NewTuner as Tuner,\n",
")\n",
"\n",
"res = Tuner(trainer).lr_find(\n",
" tft,\n",
Expand Down Expand Up @@ -2051,14 +2055,16 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# load the best model according to the validation loss\n",
"# (given that we use early stopping, this is not necessarily the last epoch)\n",
"best_model_path = trainer.checkpoint_callback.best_model_path\n",
"best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)"
"best_tft = TemporalFusionTransformer.load_from_checkpoint(\n",
" best_model_path, weights_only=False\n",
")"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ description = "Forecasting timeseries with PyTorch - dataloaders, normalizers, m
dependencies = [
"numpy<=3.0.0",
"torch >=2.0.0,!=2.0.1,<3.0.0",
"lightning >=2.0.0,<2.6.0",
"lightning >=2.0.0,<3.0.0",
"scipy >=1.8,<2.0",
"pandas >=1.3.0,<3.0.0",
"scikit-learn >=1.2,<2.0",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,20 @@
"""

import copy
import functools
import logging
import os
from typing import Any, Union

import lightning.pytorch as pl
from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint
from lightning.pytorch.loggers import TensorBoardLogger
from lightning.pytorch.trainer import Trainer
from lightning.pytorch.tuner import Tuner
import numpy as np
import scipy._lib._util
from skbase.utils.dependencies import _check_soft_dependencies
import torch
from torch.utils.data import DataLoader

from pytorch_forecasting import TemporalFusionTransformer
Expand All @@ -23,6 +27,26 @@
optuna_logger = logging.getLogger("optuna")


# todo: Remove this class once lightning allows the pass of weights_only to tuner
class _NewTuner(Tuner):
def lr_find(self, *args, **kwargs):
strategy = self._trainer.strategy
original_load_checkpoint = strategy.load_checkpoint

@functools.wraps(original_load_checkpoint)
def new_load_checkpoint(*ckpt_args, **ckpt_kwargs):
ckpt_kwargs["weights_only"] = False
return original_load_checkpoint(*ckpt_args, **ckpt_kwargs)

if not _check_soft_dependencies("lightning<2.6", severity="none"):
strategy.load_checkpoint = new_load_checkpoint

try:
return super().lr_find(*args, **kwargs)
finally:
strategy.load_checkpoint = original_load_checkpoint


# ToDo: remove this once statsmodels release a version compatible with latest
# scipy version
def _lazywhere(cond, arrays, f, fillvalue=np.nan, f2=None):
Expand Down Expand Up @@ -209,7 +233,7 @@ def objective(trial: optuna.Trial) -> float:
enable_progress_bar=False,
enable_model_summary=False,
)
tuner = Tuner(lr_trainer)
tuner = _NewTuner(lr_trainer)
res = tuner.lr_find(
model,
train_dataloaders=train_dataloaders,
Expand Down
2 changes: 1 addition & 1 deletion pytorch_forecasting/tests/test_all_estimators.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ def _integration(
assert len(test_outputs) > 0
# check loading
net = estimator_cls.load_from_checkpoint(
trainer.checkpoint_callback.best_model_path
trainer.checkpoint_callback.best_model_path, weights_only=False
)

# check prediction
Expand Down
4 changes: 3 additions & 1 deletion tests/test_models/test_deepar.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,9 @@ def _integration(
test_outputs = trainer.test(net, dataloaders=test_dataloader)
assert len(test_outputs) > 0
# check loading
net = DeepAR.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
net = DeepAR.load_from_checkpoint(
trainer.checkpoint_callback.best_model_path, weights_only=False
)

# check prediction
net.predict(
Expand Down
2 changes: 1 addition & 1 deletion tests/test_models/test_mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def _integration(
)
# check loading
net = DecoderMLP.load_from_checkpoint(
trainer.checkpoint_callback.best_model_path
trainer.checkpoint_callback.best_model_path, weights_only=False
)

# check prediction
Expand Down
4 changes: 3 additions & 1 deletion tests/test_models/test_nbeats.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,9 @@ def test_integration(dataloaders_fixed_window_without_covariates, tmp_path):
test_outputs = trainer.test(net, dataloaders=test_dataloader)
assert len(test_outputs) > 0
# check loading
net = NBeats.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
net = NBeats.load_from_checkpoint(
trainer.checkpoint_callback.best_model_path, weights_only=False
)

# check prediction
net.predict(
Expand Down
4 changes: 3 additions & 1 deletion tests/test_models/test_nhits.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,9 @@ def _integration(dataloader, tmp_path, trainer_kwargs=None, **kwargs):
test_outputs = trainer.test(net, dataloaders=test_dataloader)
assert len(test_outputs) > 0
# check loading
net = NHiTS.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
net = NHiTS.load_from_checkpoint(
trainer.checkpoint_callback.best_model_path, weights_only=False
)

# check prediction
net.predict(
Expand Down
2 changes: 1 addition & 1 deletion tests/test_models/test_rnn_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def _integration(
assert len(test_outputs) > 0
# check loading
net = RecurrentNetwork.load_from_checkpoint(
trainer.checkpoint_callback.best_model_path
trainer.checkpoint_callback.best_model_path, weights_only=False
)

# check prediction
Expand Down
Loading
Loading