Skip to content

Commit

Permalink
Add doc and output_def UT
Browse files Browse the repository at this point in the history
  • Loading branch information
Chengqian-Zhang committed Aug 28, 2024
1 parent b3031d0 commit d5e03e9
Show file tree
Hide file tree
Showing 3 changed files with 125 additions and 1 deletion.
44 changes: 44 additions & 0 deletions deepmd/dpmodel/fitting/property_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,50 @@

@InvarFitting.register("property")
class PropertyFittingNet(InvarFitting):
r"""Fitting the rotationally invariant porperties of `task_dim` of the system.
Parameters
----------
ntypes
The number of atom types.
dim_descrpt
The dimension of the input descriptor.
task_dim
The dimension of outputs of fitting net.
neuron
Number of neurons :math:`N` in each hidden layer of the fitting net
bias_atom_p
Average property per atom for each element.
rcond
The condition number for the regression of atomic energy.
trainable
If the weights of fitting net are trainable.
Suppose that we have :math:`N_l` hidden layers in the fitting net,
this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable.
intensive
Whether the fitting property is intensive.
bias_method
The method of applying the bias to each atomic output, user can select 'normal' or 'no_bias'.
If 'normal' is used, the computed bias will be added to the atomic output.
If 'no_bias' is used, no bias will be added to the atomic output.
resnet_dt
Time-step `dt` in the resnet construction:
:math:`y = x + dt * \phi (Wx + b)`
numb_fparam
Number of frame parameter
numb_aparam
Number of atomic parameter
activation_function
The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN|
precision
The precision of the embedding net parameters. Supported options are |PRECISION|
mixed_types
If false, different atomic types uses different fitting net, otherwise different atom types share the same fitting net.
exclude_types: List[int]
Atomic contributions of the excluded atom types are set zero.
type_map: List[str], Optional
A list of strings. Give the name to each type of atoms.
"""
def __init__(
self,
ntypes: int,
Expand Down
36 changes: 36 additions & 0 deletions deepmd/pt/model/task/property.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,42 @@

@Fitting.register("property")
class PropertyFittingNet(InvarFitting):
"""Fitting the rotationally invariant porperties of `task_dim` of the system.
Parameters
----------
ntypes : int
Element count.
dim_descrpt : int
Embedding width per atom.
task_dim : int
The dimension of outputs of fitting net.
neuron : List[int]
Number of neurons in each hidden layers of the fitting net.
bias_atom_p : torch.Tensor, optional
Average property per atom for each element.
intensive : bool, optional
Whether the fitting property is intensive.
bias_method : str, optional
The method of applying the bias to each atomic output, user can select 'normal' or 'no_bias'.
If 'normal' is used, the computed bias will be added to the atomic output.
If 'no_bias' is used, no bias will be added to the atomic output.
resnet_dt : bool
Using time-step in the ResNet construction.
numb_fparam : int
Number of frame parameters.
numb_aparam : int
Number of atomic parameters.
activation_function : str
Activation function.
precision : str
Numerical precision.
mixed_types : bool
If true, use a uniform fitting net for all atom types, otherwise use
different fitting nets for different atom types.
seed : int, optional
Random seed.
"""
def __init__(
self,
ntypes: int,
Expand Down
46 changes: 45 additions & 1 deletion source/tests/common/dpmodel/test_output_def.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,19 @@ def test_model_output_def(self):
c_differentiable=False,
atomic=True,
),
OutputVariableDef(
"gap",
[13],
reducible=True,
r_differentiable=False,
c_differentiable=False,
atomic=True,
intensive=True,
)
]
# fitting definition
fd = FittingOutputDef(defs)
expected_keys = ["energy", "energy2", "energy3", "dos", "foo"]
expected_keys = ["energy", "energy2", "energy3", "dos", "foo", "gap"]
self.assertEqual(
set(expected_keys),
set(fd.keys()),
Expand All @@ -94,18 +103,21 @@ def test_model_output_def(self):
self.assertEqual(fd["energy3"].shape, [1])
self.assertEqual(fd["dos"].shape, [10])
self.assertEqual(fd["foo"].shape, [3])
self.assertEqual(fd["gap"].shape, [13])
# atomic
self.assertEqual(fd["energy"].atomic, True)
self.assertEqual(fd["energy2"].atomic, True)
self.assertEqual(fd["energy3"].atomic, True)
self.assertEqual(fd["dos"].atomic, True)
self.assertEqual(fd["foo"].atomic, True)
self.assertEqual(fd["gap"].atomic, True)
# reduce
self.assertEqual(fd["energy"].reducible, True)
self.assertEqual(fd["energy2"].reducible, True)
self.assertEqual(fd["energy3"].reducible, True)
self.assertEqual(fd["dos"].reducible, True)
self.assertEqual(fd["foo"].reducible, False)
self.assertEqual(fd["gap"].reducible, True)
# derivative
self.assertEqual(fd["energy"].r_differentiable, True)
self.assertEqual(fd["energy"].c_differentiable, True)
Expand All @@ -118,14 +130,17 @@ def test_model_output_def(self):
self.assertEqual(fd["energy3"].r_hessian, False)
self.assertEqual(fd["dos"].r_differentiable, False)
self.assertEqual(fd["foo"].r_differentiable, False)
self.assertEqual(fd["gap"].r_differentiable, False)
self.assertEqual(fd["dos"].c_differentiable, False)
self.assertEqual(fd["foo"].c_differentiable, False)
self.assertEqual(fd["gap"].c_differentiable, False)
# magnetic
self.assertEqual(fd["energy"].magnetic, False)
self.assertEqual(fd["energy2"].magnetic, False)
self.assertEqual(fd["energy3"].magnetic, True)
self.assertEqual(fd["dos"].magnetic, False)
self.assertEqual(fd["foo"].magnetic, False)
self.assertEqual(fd["gap"].magnetic, False)
# model definition
md = ModelOutputDef(fd)
expected_keys = [
Expand All @@ -152,6 +167,8 @@ def test_model_output_def(self):
"dos_redu",
"mask",
"mask_mag",
"gap",
"gap_redu"
]
self.assertEqual(
set(expected_keys),
Expand All @@ -165,6 +182,7 @@ def test_model_output_def(self):
self.assertEqual(md["energy3"].reducible, True)
self.assertEqual(md["dos"].reducible, True)
self.assertEqual(md["foo"].reducible, False)
self.assertEqual(md["gap"].reducible, True)
# derivative
self.assertEqual(md["energy"].r_differentiable, True)
self.assertEqual(md["energy"].c_differentiable, True)
Expand All @@ -177,8 +195,10 @@ def test_model_output_def(self):
self.assertEqual(md["energy3"].r_hessian, False)
self.assertEqual(md["dos"].r_differentiable, False)
self.assertEqual(md["foo"].r_differentiable, False)
self.assertEqual(md["gap"].r_differentiable, False)
self.assertEqual(md["dos"].c_differentiable, False)
self.assertEqual(md["foo"].c_differentiable, False)
self.assertEqual(md["gap"].c_differentiable, False)
# shape
self.assertEqual(md["mask"].shape, [1])
self.assertEqual(md["mask_mag"].shape, [1])
Expand All @@ -201,6 +221,8 @@ def test_model_output_def(self):
self.assertEqual(md["energy3_derv_c_redu"].shape, [1, 9])
self.assertEqual(md["energy3_derv_r_mag"].shape, [1, 3])
self.assertEqual(md["energy3_derv_c_mag"].shape, [1, 9])
self.assertEqual(md["gap"].shape, [13])
self.assertEqual(md["gap_redu"].shape, [13])
# atomic
self.assertEqual(md["energy"].atomic, True)
self.assertEqual(md["energy2"].atomic, True)
Expand All @@ -221,6 +243,8 @@ def test_model_output_def(self):
self.assertEqual(md["energy3_derv_r_mag"].atomic, True)
self.assertEqual(md["energy3_derv_c_mag"].atomic, True)
self.assertEqual(md["energy3_derv_c_redu"].atomic, False)
self.assertEqual(md["gap"].atomic, True)
self.assertEqual(md["gap_redu"].atomic, False)
# category
self.assertEqual(md["mask"].category, OutputVariableCategory.OUT)
self.assertEqual(md["mask_mag"].category, OutputVariableCategory.OUT)
Expand Down Expand Up @@ -256,6 +280,8 @@ def test_model_output_def(self):
self.assertEqual(
md["energy3_derv_c_mag"].category, OutputVariableCategory.DERV_C
)
self.assertEqual(md["gap"].category, OutputVariableCategory.OUT)
self.assertEqual(md["gap_redu"].category, OutputVariableCategory.REDU)
# flag
OVO = OutputVariableOperation
self.assertEqual(md["energy"].category & OVO.REDU, 0)
Expand All @@ -273,6 +299,9 @@ def test_model_output_def(self):
self.assertEqual(md["foo"].category & OVO.REDU, 0)
self.assertEqual(md["foo"].category & OVO.DERV_R, 0)
self.assertEqual(md["foo"].category & OVO.DERV_C, 0)
self.assertEqual(md["gap"].category & OVO.REDU, 0)
self.assertEqual(md["gap"].category & OVO.DERV_R, 0)
self.assertEqual(md["gap"].category & OVO.DERV_C, 0)
# flag: energy
self.assertEqual(
md["energy_redu"].category & OVO.REDU,
Expand Down Expand Up @@ -531,6 +560,21 @@ def test_energy_magnetic(self):
magnetic=True,
),
)

def test_inten_requires_redu(self):
with self.assertRaises(ValueError) as context:
(
OutputVariableDef(
"foo",
[20],
reducible=False,
atomic=True,
r_differentiable=False,
r_hessian=False,
magnetic=False,
intensive=True,
),
)

def test_model_decorator(self):
nf = 2
Expand Down

0 comments on commit d5e03e9

Please sign in to comment.