Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PPSCI Doc No.75, 77-85】 #703

Merged
merged 3 commits into from
Dec 21, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 38 additions & 8 deletions ppsci/loss/func.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,23 +27,53 @@
class FunctionalLoss(base.Loss):
r"""Functional loss class, which allows to use custom loss computing function from given loss_expr for complex computation cases.

$$
L = f(x, y)
NKNaN marked this conversation as resolved.
Show resolved Hide resolved
$$

$$
\mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}
$$

when `reduction` is set to "mean"

$$
L = MEAN \left[ f(x, y) \right]
$$

when `reduction` is set to "sum"

$$
L = SUM \left[ f(x, y) \right]
$$

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里可以删掉,因为 FunctionalLoss 不需要 reduction这个参数

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

已删除

Args:
loss_expr (Callable): expression of loss calculation.
reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

可以删除reduction参数

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

已删除

weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

Examples:
>>> import ppsci
>>> import paddle
>>> from ppsci.loss import FunctionalLoss
>>> import paddle.nn.functional as F
>>> def loss_expr(output_dict, *args):
>>> def mse_sum_loss(output_dict, label_dict, weight_dict=None):
... losses = 0
... for key in output_dict:
... length = int(len(output_dict[key])/2)
... out_dict = {key: output_dict[key][:length]}
... label_dict = {key: output_dict[key][length:]}
... losses += F.mse_loss(out_dict, label_dict, "sum")
... for key in output_dict.keys():
... loss = F.mse_loss(output_dict[key], label_dict[key], "sum")
... if weight_dict:
... loss *= weight_dict[key]
... losses += loss
... return losses
>>> loss = ppsci.loss.FunctionalLoss(loss_expr)
>>> loss = FunctionalLoss(mse_sum_loss)
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight_dict = {'u': 0.8, 'v': 0.2}
>>> result = loss(output_dict, label_dict, weight_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
17.89600182)
"""

def __init__(
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

可以把下方的reduction这个参数删除

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

已删除

Expand Down
22 changes: 20 additions & 2 deletions ppsci/loss/integral.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,26 @@ class IntegralLoss(base.Loss):
weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.
Examples:
>>> import ppsci
>>> loss = ppsci.loss.IntegralLoss("mean")
>>> import paddle
>>> from ppsci.loss import IntegralLoss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]),
... 'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]),
... 'area': paddle.to_tensor([[0.01, 0.02, 0.03], [0.01, 0.02, 0.03]])}
>>> label_dict = {'u': paddle.to_tensor([-1.8, 0.0]),
... 'v': paddle.to_tensor([0.1, 0.1])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = IntegralLoss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1.40911996)
>>> loss = IntegralLoss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
2.81823993)
"""

def __init__(
Expand Down
33 changes: 31 additions & 2 deletions ppsci/loss/l1.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,13 +119,42 @@ class PeriodicL1Loss(base.Loss):
$\mathbf{x_l} \in \mathcal{R}^{N}$ is the first half of batch output,
$\mathbf{x_r} \in \mathcal{R}^{N}$ is the second half of batch output.
when `reduction` is set to "mean"
$$
L = MEAN \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 \right)
$$
when `reduction` is set to "sum"
$$
L = SUM \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 \right)
$$
Args:
reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.
Examples:
>>> import ppsci
>>> loss = ppsci.loss.PeriodicL1Loss("mean")
>>> import paddle
>>> from ppsci.loss import PeriodicL1Loss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]),
... 'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 0.0, 1.0], [-0.2, 0.2, 2.5]]),
... 'v': paddle.to_tensor([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = PeriodicL1Loss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
4.19999981)
>>> loss = PeriodicL1Loss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
4.19999981)
"""

def __init__(
Expand Down
97 changes: 91 additions & 6 deletions ppsci/loss/l2.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,40 @@ class L2Loss(base.Loss):
\mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}
$$
when `reduction` is set to "mean"
$$
L = MEAN \left( \Vert \mathbf{x} - \mathbf{y} \Vert_2 \right)
$$
when `reduction` is set to "sum"
$$
L = SUM \left( \Vert \mathbf{x} - \mathbf{y} \Vert_2 \right)
$$
Args:
reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.
Examples:
>>> import ppsci
>>> loss = ppsci.loss.L2Loss()
>>> import paddle
>>> from ppsci.loss import L2Loss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = L2Loss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
2.78884506)
>>> loss = L2Loss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
5.57769012)
"""

def __init__(
Expand Down Expand Up @@ -92,13 +119,42 @@ class PeriodicL2Loss(base.Loss):
$\mathbf{x_l} \in \mathcal{R}^{N}$ is the first half of batch output,
$\mathbf{x_r} \in \mathcal{R}^{N}$ is the second half of batch output.
when `reduction` is set to "mean"
$$
L = MEAN \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 \right)
$$
when `reduction` is set to "sum"
$$
L = SUM \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 \right)
$$
Args:
reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.
Examples:
>>> import ppsci
>>> loss = ppsci.loss.PeriodicL2Loss()
>>> import paddle
>>> from ppsci.loss import PeriodicL2Loss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]),
... 'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 0.0, 1.0], [-0.2, 0.2, 2.5]]),
... 'v': paddle.to_tensor([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = PeriodicL2Loss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
2.67581749)
>>> loss = PeriodicL2Loss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
2.67581749)
"""

def __init__(
Expand Down Expand Up @@ -158,13 +214,42 @@ class L2RelLoss(base.Loss):
\mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}
$$
when `reduction` is set to "mean"
$$
L = MEAN \left( \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} \right)
$$
when `reduction` is set to "sum"
$$
L = SUM \left( \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} \right)
$$
Args:
reduction (Literal["mean", "sum"], optional): Specifies the reduction to apply to the output: 'mean' | 'sum'. Defaults to "mean".
weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.
Examples:
>>> import ppsci
>>> loss = ppsci.loss.L2RelLoss()
>>> import paddle
>>> from ppsci.loss import L2RelLoss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = L2RelLoss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
2.93676996)
>>> loss = L2RelLoss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
5.87353992)
"""

def __init__(
Expand Down
21 changes: 19 additions & 2 deletions ppsci/loss/mae.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,25 @@ class MAELoss(base.Loss):
weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.
Examples:
>>> import ppsci
>>> loss = ppsci.loss.MAELoss("mean")
>>> import paddle
>>> from ppsci.loss import MAELoss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = MAELoss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1.67999995)
>>> loss = MAELoss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
6.71999979)
"""

def __init__(
Expand Down
65 changes: 61 additions & 4 deletions ppsci/loss/mse.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,25 @@ class MSELoss(base.Loss):
weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.
Examples:
>>> import ppsci
>>> loss = ppsci.loss.MSELoss("mean")
>>> import paddle
>>> from ppsci.loss import MSELoss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = MSELoss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
4.47400045)
>>> loss = MSELoss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
17.89600182)
"""

def __init__(
Expand Down Expand Up @@ -108,8 +125,27 @@ class MSELossWithL2Decay(MSELoss):
ValueError: reduction should be 'mean' or 'sum'.
Examples:
>>> import ppsci
>>> loss = ppsci.loss.MSELossWithL2Decay("mean", {"k_matrix": 2.0})
>>> import paddle
>>> from ppsci.loss import MSELossWithL2Decay
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> regularization_dict = {'u': 2.0}
>>> loss = MSELossWithL2Decay(regularization_dict=regularization_dict, weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
12.39400005)
>>> regularization_dict = {'v': 1.0}
>>> loss = MSELossWithL2Decay(reduction="sum", regularization_dict=regularization_dict, weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
21.85600090)
"""

def __init__(
Expand Down Expand Up @@ -152,6 +188,27 @@ class PeriodicMSELoss(base.Loss):
Args:
reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.
Examples:
>>> import paddle
>>> from ppsci.loss import PeriodicMSELoss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = PeriodicMSELoss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
2.59999967)
>>> loss = PeriodicMSELoss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
5.19999933)
"""

def __init__(
Expand Down