From b108bee393453d58890aa09d7cae79340d91cb2c Mon Sep 17 00:00:00 2001 From: Oskar Triebe Date: Wed, 18 Oct 2023 17:21:24 -0700 Subject: [PATCH 1/3] [Devops] Relax dependency ranges to next major number (#1459) * relax dependencies * mend * mend * update lock --- poetry.lock | 9 +++++---- pyproject.toml | 16 ++++++++-------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/poetry.lock b/poetry.lock index fee1ecb84..c97e9ecfd 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1306,13 +1306,13 @@ protobuf = ["grpcio-tools (>=1.59.0)"] [[package]] name = "holidays" -version = "0.33" +version = "0.35" description = "Generate and work with holidays in Python" optional = false python-versions = ">=3.8" files = [ - {file = "holidays-0.33-py3-none-any.whl", hash = "sha256:49e87b6d39227ddc8569bb2a27d333ffcd1bc2f182a7561cd48b9f7a393b2212"}, - {file = "holidays-0.33.tar.gz", hash = "sha256:7fb33f5f1776f0fc95d98733aa9c3fece7dcdf5124574d0a003b61a1b6f93515"}, + {file = "holidays-0.35-py3-none-any.whl", hash = "sha256:3e179ff248f86dee813160c92e2c4babe3bb3c732287499430d1e3ee90c15f16"}, + {file = "holidays-0.35.tar.gz", hash = "sha256:e62b797ca3ba2366093458cda0cde33d65e7168be8104815b4149afd4ea4a9ed"}, ] [package.dependencies] @@ -4314,4 +4314,5 @@ live = ["livelossplot"] [metadata] lock-version = "2.0" python-versions = ">=3.8,<3.11" -content-hash = "e9101b1a0eb4ed0cc7cd2d81f3dc43740bf74c6faab62a55e8d6e34734084a4c" +content-hash = "94a84c35cfa5843698c73ef7023cdfb2a79643b7dbc4ba8833c8dc61f0e9176b" + diff --git a/pyproject.toml b/pyproject.toml index 5fa023d22..aba346aa3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,20 +19,20 @@ Homepage = "https://github.com/ourownstory/neural_prophet" [tool.poetry.dependencies] python = ">=3.8,<3.11" -captum = "^0.6.0" -holidays = "^0.33" +captum = ">=0.6.0,<1.0" +holidays = ">=0.33,<1.0" matplotlib = "^3.5.3" numpy = "^1.22.0" pandas = "^1.3.5" plotly = "^5.13.1" -plotly-resampler = "^0.8.3.1" -pytorch-lightning = "^1.9.4" +plotly-resampler = ">=0.8.3.1,<1.0" +pytorch-lightning = "^1.9.4" # TODO: move to ^2.0 tensorboard = "^2.11.2" -torch = "2.0.0" -torchmetrics = "^0.11.3" +torch = "2.0.0" # TODO: relax to ^2.0 +torchmetrics = ">=0.11.3,<1.0" typing-extensions = "^4.5.0" nbformat = ">=4.2.0" -livelossplot = { version = "^0.5.5", optional = true } +livelossplot = { version = ">=0.5.5,<1.0", optional = true } [tool.poetry.extras] live = ["livelossplot"] @@ -61,7 +61,7 @@ furo = "^2022.9.29" optional = true [tool.poetry.group.pyright.dependencies] -pandas-stubs = "^2" +pandas-stubs = "^2.0" [build-system] requires = ["poetry-core"] From bf5794dfb9eab24da574ac580049ff056a188478 Mon Sep 17 00:00:00 2001 From: Oskar Triebe Date: Wed, 18 Oct 2023 17:35:12 -0700 Subject: [PATCH 2/3] fix-tutorial-data-descript daily not hourly (#1453) --- docs/source/tutorials/tutorial01.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/tutorials/tutorial01.ipynb b/docs/source/tutorials/tutorial01.ipynb index 2d480ed23..b31eb95fc 100644 --- a/docs/source/tutorials/tutorial01.ipynb +++ b/docs/source/tutorials/tutorial01.ipynb @@ -23,7 +23,7 @@ "source": [ "## Loading the dataset\n", "\n", - "For our tutorials we work with energy price data over the 4 years from Spain. The dataset was published on [Kaggle](https://www.kaggle.com/datasets/nicholasjhana/energy-consumption-generation-prices-and-weather) and contains a lot of information to which we will come back later. For now we use a prepared version of the dataset with the hourly energy price data only." + "For our tutorials we work with energy price data over the 4 years from Spain. The dataset was published on [Kaggle](https://www.kaggle.com/datasets/nicholasjhana/energy-consumption-generation-prices-and-weather) and contains a lot of information to which we will come back later. For now we use a prepared version of the dataset with the daily energy price data only." ] }, { From f08dcf814034527577abe21ad0bdf28e27d4eb66 Mon Sep 17 00:00:00 2001 From: Oskar Triebe Date: Thu, 19 Oct 2023 16:18:04 -0700 Subject: [PATCH 3/3] [Minor] SmoothL1Loss correctly mentioned instead of Huber (#1458) * replace false mentions of Huber loss * set SmoothL1Loss beta to 0.1 * increase beta to 0.3 * reset beta to 1.0 --- .../feature-guides/hyperparameter-selection.md | 2 +- ...205\345\217\202\346\225\260\351\200\211\345\217\226.md" | 4 ++-- neuralprophet/configure.py | 7 ++++--- neuralprophet/forecaster.py | 4 ++-- tests/test_configure.py | 2 +- tests/test_uncertainty.py | 2 +- 6 files changed, 11 insertions(+), 10 deletions(-) diff --git a/docs/source/how-to-guides/feature-guides/hyperparameter-selection.md b/docs/source/how-to-guides/feature-guides/hyperparameter-selection.md index 785fb4338..0e532d5f5 100644 --- a/docs/source/how-to-guides/feature-guides/hyperparameter-selection.md +++ b/docs/source/how-to-guides/feature-guides/hyperparameter-selection.md @@ -27,7 +27,7 @@ If it looks like the model is overfitting to the training data (the live loss pl you can reduce `epochs` and `learning_rate`, and potentially increase the `batch_size`. If it is underfitting, the number of `epochs` and `learning_rate` can be increased and the `batch_size` potentially decreased. -The default loss function is the 'Huber' loss, which is considered to be robust to outliers. +The default loss function is the 'SmoothL1Loss' loss, which is considered to be robust to outliers. However, you are free to choose the standard `MSE` or any other PyTorch `torch.nn.modules.loss` loss function. ## Increasing Depth of the Model diff --git "a/docs/zh/\350\266\205\345\217\202\346\225\260\351\200\211\345\217\226.md" "b/docs/zh/\350\266\205\345\217\202\346\225\260\351\200\211\345\217\226.md" index af553e5f0..f101d7e50 100644 --- "a/docs/zh/\350\266\205\345\217\202\346\225\260\351\200\211\345\217\226.md" +++ "b/docs/zh/\350\266\205\345\217\202\346\225\260\351\200\211\345\217\226.md" @@ -22,7 +22,7 @@ NeuralProphet有一些超参数需要用户指定。如果没有指定,将使 | `learning_rate` | None | | `epochs` | None | | `batch_size` | None | -| `loss_func` | Huber | +| `loss_func` | SmoothL1Loss | | `train_speed` | None | | `normalize_y` | auto | | `impute_missing` | True | @@ -43,7 +43,7 @@ NeuralProphet采用随机梯度下降法进行拟合--更准确地说,是采 如果看起来模型对训练数据过度拟合(实时损失图在此很有用),可以减少 `epochs` 和 `learning_rate`,并有可能增加 `batch_size`。如果是低拟合,可以增加`epochs` 和`learning_rate` 的数量,并有可能减少`batch_size` 。 -默认的损失函数是 "Huber "损失,该函数被认为对离群值具有鲁棒性。但是,您可以自由选择标准的 "MSE "或任何其他PyTorch `torch.nn.modules.loss`损失函数。 +默认的损失函数是 "SmoothL1Loss "损失,该函数被认为对离群值具有鲁棒性。但是,您可以自由选择标准的 "MSE "或任何其他PyTorch `torch.nn.modules.loss`损失函数。 ## 增加模型的深度 diff --git a/neuralprophet/configure.py b/neuralprophet/configure.py index 9b658854e..fe4686811 100644 --- a/neuralprophet/configure.py +++ b/neuralprophet/configure.py @@ -117,9 +117,10 @@ def __post_init__(self): def set_loss_func(self): if isinstance(self.loss_func, str): - if self.loss_func.lower() in ["huber", "smoothl1", "smoothl1loss"]: - self.loss_func = torch.nn.SmoothL1Loss(reduction="none") - elif self.loss_func.lower() in ["mae", "l1", "l1loss"]: + if self.loss_func.lower() in ["smoothl1", "smoothl1loss", "huber"]: + # keeping 'huber' for backwards compatiblility, though not identical + self.loss_func = torch.nn.SmoothL1Loss(reduction="none", beta=1.0) + elif self.loss_func.lower() in ["mae", "maeloss", "l1", "l1loss"]: self.loss_func = torch.nn.L1Loss(reduction="none") elif self.loss_func.lower() in ["mse", "mseloss", "l2", "l2loss"]: self.loss_func = torch.nn.MSELoss(reduction="none") diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py index 383aa18c1..852fc297b 100644 --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -223,7 +223,7 @@ class NeuralProphet: Type of loss to use: Options - * (default) ``Huber``: Huber loss function + * (default) ``SmoothL1Loss``: SmoothL1 loss function * ``MSE``: Mean Squared Error loss function * ``MAE``: Mean Absolute Error loss function * ``torch.nn.functional.loss.``: loss or callable for custom loss, eg. L1-Loss @@ -360,7 +360,7 @@ def __init__( learning_rate: Optional[float] = None, epochs: Optional[int] = None, batch_size: Optional[int] = None, - loss_func: Union[str, torch.nn.modules.loss._Loss, Callable] = "Huber", + loss_func: Union[str, torch.nn.modules.loss._Loss, Callable] = "SmoothL1Loss", optimizer: Union[str, Type[torch.optim.Optimizer]] = "AdamW", newer_samples_weight: float = 2, newer_samples_start: float = 0.0, diff --git a/tests/test_configure.py b/tests/test_configure.py index 8bcae981d..e5c5e9800 100644 --- a/tests/test_configure.py +++ b/tests/test_configure.py @@ -9,7 +9,7 @@ def generate_config_train_params(overrides={}): "learning_rate": None, "epochs": None, "batch_size": None, - "loss_func": "Huber", + "loss_func": "SmoothL1Loss", "optimizer": "AdamW", } for key, value in overrides.items(): diff --git a/tests/test_uncertainty.py b/tests/test_uncertainty.py index e7ba16118..039128cb1 100644 --- a/tests/test_uncertainty.py +++ b/tests/test_uncertainty.py @@ -60,7 +60,7 @@ def test_uncertainty_estimation_peyton_manning(): m = NeuralProphet( n_forecasts=1, - loss_func="Huber", + loss_func="SmoothL1Loss", quantiles=[0.01, 0.99], epochs=EPOCHS, batch_size=BATCH_SIZE,