Skip to content

Commit

Permalink
fix: bugs in uts for property fit (#4120)
Browse files Browse the repository at this point in the history
This bug is totally the same as PR #3837. 
Fix following trivial bugs in property fit uts:

- box was not used in extend_input_and_build_neighbor_list (which means
they were all tested in nopbc mode, if shifted coord is outside the box
(sometimes) and normalized explicitly, results are not the same.) Input
for fitting also used extended_atype instead of atype. (Only same when
nopbc.)
- Using of mixed_types is disordered, mismatched with descriptor or
sometimes with nlist. Now only use mixed_types==False since the
descriptor output is not in mixed types.
- Remove useless parameter `fit_diag` and `scale` test in property
fitting. Add parameter `intensive` and `bias_method` test in property
fitting.

<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit


- **New Features**
- Introduced new parameters `intensive` and `bias_method` for enhanced
flexibility in property fitting tests.
- Added a new test class `TestInvarianceOutCell` with a method
`test_trans` to evaluate invariance under transformations.
- Updated existing tests to improve clarity and maintainability by
removing the `scale` variable.

- **Bug Fixes**
- Refactored test methods to ensure correct parameter usage, enhancing
the reliability of test outcomes.

<!-- end of auto-generated comment: release notes by coderabbit.ai -->

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
Chengqian-Zhang and pre-commit-ci[bot] committed Sep 12, 2024
1 parent 8b8d12d commit 96ed5df
Showing 1 changed file with 171 additions and 85 deletions.
256 changes: 171 additions & 85 deletions source/tests/pt/model/test_property_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ def setUp(self):
self.rng = np.random.default_rng()
self.nf, self.nloc, _ = self.nlist.shape
self.dd0 = DescrptSeA(self.rcut, self.rcut_smth, self.sel).to(env.DEVICE)
self.scale = self.rng.uniform(0, 1, self.nt).tolist()

def test_consistency(
self,
Expand All @@ -59,27 +58,26 @@ def test_consistency(
self.atype_ext[:, : self.nloc], dtype=int, device=env.DEVICE
)

for mixed_types, nfp, nap, fit_diag, scale, bias_atom_p in itertools.product(
[True, False],
for nfp, nap, bias_atom_p, intensive, bias_method in itertools.product(
[0, 3],
[0, 4],
[True, False],
[None, self.scale],
[
np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]),
np.array([[11, 12, 13, 4, 15], [16, 17, 18, 9, 20]]),
],
[True, False],
["normal", "no_bias"],
):
ft0 = PropertyFittingNet(
self.nt,
self.dd0.dim_out,
task_dim=5,
numb_fparam=nfp,
numb_aparam=nap,
mixed_types=mixed_types,
fit_diag=fit_diag,
scale=scale,
mixed_types=self.dd0.mixed_types(),
bias_atom_p=bias_atom_p,
intensive=intensive,
bias_method=bias_method,
).to(env.DEVICE)

ft1 = DPProperFittingNet.deserialize(ft0.serialize())
Expand Down Expand Up @@ -133,25 +131,104 @@ def test_consistency(
def test_jit(
self,
):
for mixed_types, nfp, nap, fit_diag in itertools.product(
[True, False],
for nfp, nap, intensive, bias_method in itertools.product(
[0, 3],
[0, 4],
[True, False],
["normal", "no_bias"],
):
ft0 = PropertyFittingNet(
self.nt,
self.dd0.dim_out,
task_dim=5,
numb_fparam=nfp,
numb_aparam=nap,
mixed_types=mixed_types,
fit_diag=fit_diag,
mixed_types=self.dd0.mixed_types(),
intensive=intensive,
bias_method=bias_method,
).to(env.DEVICE)
torch.jit.script(ft0)


class TestInvariance(unittest.TestCase):
class TestInvarianceOutCell(unittest.TestCase):
def setUp(self) -> None:
self.natoms = 5
self.rcut = 4
self.rcut_smth = 0.5
self.sel = [46, 92, 4]
self.nf = 1
self.nt = 3
self.rng = np.random.default_rng()
self.coord = torch.tensor(
[
[1.1042, 0.6852, 1.3582],
[1.8812, 1.6277, 0.3153],
[1.5655, 1.0383, 0.4152],
[0.9594, 1.2298, 0.8124],
[0.7905, 0.5014, 0.6654],
],
dtype=dtype,
device=env.DEVICE,
)
self.shift = torch.tensor([1000, 1000, 1000], dtype=dtype, device=env.DEVICE)
self.atype = torch.tensor([0, 0, 0, 1, 1], dtype=torch.int32, device=env.DEVICE)
self.dd0 = DescrptSeA(self.rcut, self.rcut_smth, self.sel).to(env.DEVICE)
self.cell = torch.tensor(
[
[0.7333, 0.9166, 0.6533],
[0.1151, 0.9078, 0.2058],
[0.6907, 0.0370, 0.4863],
],
dtype=dtype,
device=env.DEVICE,
)
self.cell = (self.cell + self.cell.T) + 5.0 * torch.eye(3, device=env.DEVICE)

def test_trans(self):
atype = self.atype.reshape(1, 5)
coord_s = torch.matmul(
torch.remainder(
torch.matmul(self.coord + self.shift, torch.linalg.inv(self.cell)), 1.0
),
self.cell,
)
ft0 = PropertyFittingNet(
self.nt,
self.dd0.dim_out,
task_dim=11,
numb_fparam=0,
numb_aparam=0,
mixed_types=self.dd0.mixed_types(),
).to(env.DEVICE)
res = []
for xyz in [self.coord, coord_s]:
(
extended_coord,
extended_atype,
_,
nlist,
) = extend_input_and_build_neighbor_list(
xyz,
atype,
self.rcut,
self.sel,
self.dd0.mixed_types(),
box=self.cell,
)

rd0, gr0, _, _, _ = self.dd0(
extended_coord,
extended_atype,
nlist,
)

ret0 = ft0(rd0, atype, gr0, fparam=0, aparam=0)
res.append(ret0["property"])

np.testing.assert_allclose(to_numpy_array(res[0]), to_numpy_array(res[1]))


class TestInvarianceRandomShift(unittest.TestCase):
def setUp(self) -> None:
self.natoms = 5
self.rcut = 4
Expand All @@ -166,29 +243,29 @@ def setUp(self) -> None:
self.dd0 = DescrptSeA(self.rcut, self.rcut_smth, self.sel).to(env.DEVICE)
self.cell = torch.rand([3, 3], dtype=dtype, device=env.DEVICE)
self.cell = (self.cell + self.cell.T) + 5.0 * torch.eye(3, device=env.DEVICE)
self.scale = self.rng.uniform(0, 1, self.nt).tolist()

def test_rot(self):
atype = self.atype.reshape(1, 5)
rmat = torch.tensor(special_ortho_group.rvs(3), dtype=dtype, device=env.DEVICE)
coord_rot = torch.matmul(self.coord, rmat)
# use larger cell to rotate only coord and shift to the center of cell
cell_rot = 10.0 * torch.eye(3, dtype=dtype, device=env.DEVICE)

for mixed_types, nfp, nap, fit_diag, scale in itertools.product(
[True, False],
for nfp, nap, intensive, bias_method in itertools.product(
[0, 3],
[0, 4],
[True, False],
[None, self.scale],
["normal", "no_bias"],
):
ft0 = PropertyFittingNet(
self.nt,
self.dd0.dim_out, # dim_descrpt
task_dim=9,
numb_fparam=nfp,
numb_aparam=nap,
mixed_types=True,
fit_diag=fit_diag,
scale=scale,
mixed_types=self.dd0.mixed_types(),
intensive=intensive,
bias_method=bias_method,
).to(env.DEVICE)
if nfp > 0:
ifp = torch.tensor(
Expand All @@ -213,7 +290,12 @@ def test_rot(self):
_,
nlist,
) = extend_input_and_build_neighbor_list(
xyz + self.shift, atype, self.rcut, self.sel, mixed_types
xyz + self.shift,
atype,
self.rcut,
self.sel,
self.dd0.mixed_types(),
box=cell_rot,
)

rd0, gr0, _, _, _ = self.dd0(
Expand All @@ -222,7 +304,7 @@ def test_rot(self):
nlist,
)

ret0 = ft0(rd0, extended_atype, gr0, fparam=ifp, aparam=iap)
ret0 = ft0(rd0, atype, gr0, fparam=ifp, aparam=iap)
res.append(ret0["property"])
np.testing.assert_allclose(
to_numpy_array(res[1]),
Expand All @@ -231,42 +313,44 @@ def test_rot(self):

def test_permu(self):
coord = torch.matmul(self.coord, self.cell)
for fit_diag, scale in itertools.product([True, False], [None, self.scale]):
ft0 = PropertyFittingNet(
self.nt,
self.dd0.dim_out,
task_dim=8,
numb_fparam=0,
numb_aparam=0,
mixed_types=True,
fit_diag=fit_diag,
scale=scale,
).to(env.DEVICE)
res = []
for idx_perm in [[0, 1, 2, 3, 4], [1, 0, 4, 3, 2]]:
atype = self.atype[idx_perm].reshape(1, 5)
(
extended_coord,
extended_atype,
_,
nlist,
) = extend_input_and_build_neighbor_list(
coord[idx_perm], atype, self.rcut, self.sel, False
)
ft0 = PropertyFittingNet(
self.nt,
self.dd0.dim_out,
task_dim=8,
numb_fparam=0,
numb_aparam=0,
mixed_types=self.dd0.mixed_types(),
).to(env.DEVICE)
res = []
for idx_perm in [[0, 1, 2, 3, 4], [1, 0, 4, 3, 2]]:
atype = self.atype[idx_perm].reshape(1, 5)
(
extended_coord,
extended_atype,
_,
nlist,
) = extend_input_and_build_neighbor_list(
coord[idx_perm],
atype,
self.rcut,
self.sel,
self.dd0.mixed_types(),
box=self.cell,
)

rd0, gr0, _, _, _ = self.dd0(
extended_coord,
extended_atype,
nlist,
)
rd0, gr0, _, _, _ = self.dd0(
extended_coord,
extended_atype,
nlist,
)

ret0 = ft0(rd0, extended_atype, gr0, fparam=None, aparam=None)
res.append(ret0["property"])
ret0 = ft0(rd0, atype, gr0, fparam=None, aparam=None)
res.append(ret0["property"])

np.testing.assert_allclose(
to_numpy_array(res[0][:, idx_perm]),
to_numpy_array(res[1]),
)
np.testing.assert_allclose(
to_numpy_array(res[0][:, idx_perm]),
to_numpy_array(res[1]),
)

def test_trans(self):
atype = self.atype.reshape(1, 5)
Expand All @@ -276,38 +360,40 @@ def test_trans(self):
),
self.cell,
)
for fit_diag, scale in itertools.product([True, False], [None, self.scale]):
ft0 = PropertyFittingNet(
self.nt,
self.dd0.dim_out,
task_dim=11,
numb_fparam=0,
numb_aparam=0,
mixed_types=True,
fit_diag=fit_diag,
scale=scale,
).to(env.DEVICE)
res = []
for xyz in [self.coord, coord_s]:
(
extended_coord,
extended_atype,
_,
nlist,
) = extend_input_and_build_neighbor_list(
xyz, atype, self.rcut, self.sel, False
)
ft0 = PropertyFittingNet(
self.nt,
self.dd0.dim_out,
task_dim=11,
numb_fparam=0,
numb_aparam=0,
mixed_types=self.dd0.mixed_types(),
).to(env.DEVICE)
res = []
for xyz in [self.coord, coord_s]:
(
extended_coord,
extended_atype,
_,
nlist,
) = extend_input_and_build_neighbor_list(
xyz,
atype,
self.rcut,
self.sel,
self.dd0.mixed_types(),
box=self.cell,
)

rd0, gr0, _, _, _ = self.dd0(
extended_coord,
extended_atype,
nlist,
)
rd0, gr0, _, _, _ = self.dd0(
extended_coord,
extended_atype,
nlist,
)

ret0 = ft0(rd0, extended_atype, gr0, fparam=0, aparam=0)
res.append(ret0["property"])
ret0 = ft0(rd0, atype, gr0, fparam=0, aparam=0)
res.append(ret0["property"])

np.testing.assert_allclose(to_numpy_array(res[0]), to_numpy_array(res[1]))
np.testing.assert_allclose(to_numpy_array(res[0]), to_numpy_array(res[1]))


class TestPropertyModel(unittest.TestCase):
Expand All @@ -329,7 +415,7 @@ def setUp(self):
task_dim=3,
numb_fparam=0,
numb_aparam=0,
mixed_types=True,
mixed_types=self.dd0.mixed_types(),
intensive=True,
).to(env.DEVICE)
self.type_mapping = ["O", "H", "B"]
Expand Down

0 comments on commit 96ed5df

Please sign in to comment.