From b26bab7b2366764708eefbdf5ef56c01c1805d46 Mon Sep 17 00:00:00 2001 From: Chengqian-Zhang <2000011006@stu.pku.edu.cn> Date: Thu, 19 Sep 2024 05:20:17 +0000 Subject: [PATCH 1/8] Init branch --- deepmd/pt/train/training.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index a7b9e25b4..5dfc0ef99 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -489,6 +489,16 @@ def collect_single_finetune_params( _new_state_dict[item_key] = ( _random_state_dict[item_key].clone().detach() ) + elif _new_fitting and ((".out_bias" in item_key) or (".out_std" in item_key)): + new_key = item_key.replace( + f".{_model_key}.", f".{_model_key_from}." + ) + if _random_state_dict[item_key].shape[-1] != _origin_state_dict[new_key].shape[-1]: + assert _random_state_dict[item_key].shape[:-1] == _origin_state_dict[new_key].shape[:-1] + _origin_state_dict[new_key] = _origin_state_dict[new_key].expand(_random_state_dict[item_key].shape) + _new_state_dict[item_key] = ( + _origin_state_dict[new_key].clone().detach() + ) else: new_key = item_key.replace( f".{_model_key}.", f".{_model_key_from}." From 1dac68c58129cf3c05fe2856768589d6bd6e6a6e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 05:23:57 +0000 Subject: [PATCH 2/8] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pt/train/training.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 5dfc0ef99..0f8665039 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -489,13 +489,23 @@ def collect_single_finetune_params( _new_state_dict[item_key] = ( _random_state_dict[item_key].clone().detach() ) - elif _new_fitting and ((".out_bias" in item_key) or (".out_std" in item_key)): + elif _new_fitting and ( + (".out_bias" in item_key) or (".out_std" in item_key) + ): new_key = item_key.replace( f".{_model_key}.", f".{_model_key_from}." ) - if _random_state_dict[item_key].shape[-1] != _origin_state_dict[new_key].shape[-1]: - assert _random_state_dict[item_key].shape[:-1] == _origin_state_dict[new_key].shape[:-1] - _origin_state_dict[new_key] = _origin_state_dict[new_key].expand(_random_state_dict[item_key].shape) + if ( + _random_state_dict[item_key].shape[-1] + != _origin_state_dict[new_key].shape[-1] + ): + assert ( + _random_state_dict[item_key].shape[:-1] + == _origin_state_dict[new_key].shape[:-1] + ) + _origin_state_dict[new_key] = _origin_state_dict[ + new_key + ].expand(_random_state_dict[item_key].shape) _new_state_dict[item_key] = ( _origin_state_dict[new_key].clone().detach() ) From 2acfa62270ae70776d8fe0c370347ad3607878e7 Mon Sep 17 00:00:00 2001 From: Chengqian-Zhang <2000011006@stu.pku.edu.cn> Date: Thu, 19 Sep 2024 05:54:15 +0000 Subject: [PATCH 3/8] change fix --- deepmd/pt/train/training.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 5dfc0ef99..af5524fe9 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -484,21 +484,11 @@ def collect_single_finetune_params( if i != "_extra_state" and f".{_model_key}." in i ] for item_key in target_keys: - if _new_fitting and ".fitting_net." in item_key: + if _new_fitting and ((".fitting_net." in item_key) or (".out_bias" in item_key) or (".out_std" in item_key)): # print(f'Keep {item_key} in old model!') _new_state_dict[item_key] = ( _random_state_dict[item_key].clone().detach() ) - elif _new_fitting and ((".out_bias" in item_key) or (".out_std" in item_key)): - new_key = item_key.replace( - f".{_model_key}.", f".{_model_key_from}." - ) - if _random_state_dict[item_key].shape[-1] != _origin_state_dict[new_key].shape[-1]: - assert _random_state_dict[item_key].shape[:-1] == _origin_state_dict[new_key].shape[:-1] - _origin_state_dict[new_key] = _origin_state_dict[new_key].expand(_random_state_dict[item_key].shape) - _new_state_dict[item_key] = ( - _origin_state_dict[new_key].clone().detach() - ) else: new_key = item_key.replace( f".{_model_key}.", f".{_model_key_from}." From e8140b21671b3c1046a906ea9cc44f8f6257dd56 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 05:56:11 +0000 Subject: [PATCH 4/8] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pt/train/training.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index af5524fe9..91d32d0ea 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -484,7 +484,11 @@ def collect_single_finetune_params( if i != "_extra_state" and f".{_model_key}." in i ] for item_key in target_keys: - if _new_fitting and ((".fitting_net." in item_key) or (".out_bias" in item_key) or (".out_std" in item_key)): + if _new_fitting and ( + (".fitting_net." in item_key) + or (".out_bias" in item_key) + or (".out_std" in item_key) + ): # print(f'Keep {item_key} in old model!') _new_state_dict[item_key] = ( _random_state_dict[item_key].clone().detach() From 55d0982c9151aaf7b587521a26dc2ec73c67af3f Mon Sep 17 00:00:00 2001 From: Chengqian-Zhang <2000011006@stu.pku.edu.cn> Date: Thu, 19 Sep 2024 11:40:39 +0000 Subject: [PATCH 5/8] Add YT --- source/checkpoint | 1 + source/tests/pt/test_training.py | 65 ++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) create mode 100644 source/checkpoint diff --git a/source/checkpoint b/source/checkpoint new file mode 100644 index 000000000..d6c737351 --- /dev/null +++ b/source/checkpoint @@ -0,0 +1 @@ +model.ckpt-1.pt \ No newline at end of file diff --git a/source/tests/pt/test_training.py b/source/tests/pt/test_training.py index 0833200d4..266dfd84d 100644 --- a/source/tests/pt/test_training.py +++ b/source/tests/pt/test_training.py @@ -448,5 +448,70 @@ def tearDown(self) -> None: DPTrainTest.tearDown(self) +class TestPropFintuFromEnerModel(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_dpa1) + self.config["model"]["type_map"] = ["H", "C", "N", "O"] + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + + property_input = str(Path(__file__).parent / "property/input.json") + with open(property_input) as f: + self.config_property = json.load(f) + prop_data_file = [str(Path(__file__).parent / "property/single")] + self.config_property["training"]["training_data"]["systems"] = prop_data_file + self.config_property["training"]["validation_data"]["systems"] = prop_data_file + self.config_property["model"]["descriptor"] = deepcopy(model_dpa1["descriptor"]) + self.config_property["training"]["numb_steps"] = 1 + self.config_property["training"]["save_freq"] = 1 + + def test_dp_train(self): + # test training from scratch + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + state_dict_trained = trainer.wrapper.model.state_dict() + + # test fine-tuning using diffferent fitting_net, here using property fitting + finetune_model = self.config["training"].get("save_ckpt", "model.ckpt") + ".pt" + self.config_property["model"], finetune_links = get_finetune_rules( + finetune_model, + self.config_property["model"], + model_branch="RANDOM", + ) + trainer_finetune = get_trainer( + deepcopy(self.config_property), + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # check parameters + state_dict_finetuned = trainer_finetune.wrapper.model.state_dict() + for state_key in state_dict_finetuned: + if "out_bias" not in state_key and "out_std" not in state_key and "fitting" not in state_key: + torch.testing.assert_close( + state_dict_trained[state_key], + state_dict_finetuned[state_key], + ) + + # check running + trainer_finetune.run() + + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith(".pt"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + + if __name__ == "__main__": unittest.main() From 580734947b13df62bcdf8ff461ad7c243b99857d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 11:42:04 +0000 Subject: [PATCH 6/8] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- source/checkpoint | 2 +- source/tests/pt/test_training.py | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/source/checkpoint b/source/checkpoint index d6c737351..aa6f74047 100644 --- a/source/checkpoint +++ b/source/checkpoint @@ -1 +1 @@ -model.ckpt-1.pt \ No newline at end of file +model.ckpt-1.pt diff --git a/source/tests/pt/test_training.py b/source/tests/pt/test_training.py index 266dfd84d..fa9e5c138 100644 --- a/source/tests/pt/test_training.py +++ b/source/tests/pt/test_training.py @@ -493,7 +493,11 @@ def test_dp_train(self): # check parameters state_dict_finetuned = trainer_finetune.wrapper.model.state_dict() for state_key in state_dict_finetuned: - if "out_bias" not in state_key and "out_std" not in state_key and "fitting" not in state_key: + if ( + "out_bias" not in state_key + and "out_std" not in state_key + and "fitting" not in state_key + ): torch.testing.assert_close( state_dict_trained[state_key], state_dict_finetuned[state_key], @@ -502,7 +506,6 @@ def test_dp_train(self): # check running trainer_finetune.run() - def tearDown(self): for f in os.listdir("."): if f.startswith("model") and f.endswith(".pt"): From 7b966b7726980848165cd28f576967f0e973d8b3 Mon Sep 17 00:00:00 2001 From: Chengqian-Zhang <2000011006@stu.pku.edu.cn> Date: Thu, 19 Sep 2024 11:46:52 +0000 Subject: [PATCH 7/8] Delete useless file --- source/checkpoint | 1 - 1 file changed, 1 deletion(-) delete mode 100644 source/checkpoint diff --git a/source/checkpoint b/source/checkpoint deleted file mode 100644 index d6c737351..000000000 --- a/source/checkpoint +++ /dev/null @@ -1 +0,0 @@ -model.ckpt-1.pt \ No newline at end of file From e51a4f74e8077aa9d95a50ea90010ac3e9df439f Mon Sep 17 00:00:00 2001 From: Chengqian-Zhang <2000011006@stu.pku.edu.cn> Date: Mon, 23 Sep 2024 07:11:05 +0000 Subject: [PATCH 8/8] change fitting to descriptor --- deepmd/pt/train/training.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index fb8d92107..9bdc80195 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -484,11 +484,7 @@ def collect_single_finetune_params( if i != "_extra_state" and f".{_model_key}." in i ] for item_key in target_keys: - if _new_fitting and ( - (".fitting_net." in item_key) - or (".out_bias" in item_key) - or (".out_std" in item_key) - ): + if _new_fitting and (".descriptor." not in item_key): # print(f'Keep {item_key} in old model!') _new_state_dict[item_key] = ( _random_state_dict[item_key].clone().detach()