diff --git a/.github/workflows/todo.yml b/.github/workflows/todo.yml new file mode 100644 index 0000000000..2608bb1071 --- /dev/null +++ b/.github/workflows/todo.yml @@ -0,0 +1,20 @@ +name: TODO workflow +on: + push: + branches: + - devel +jobs: + build: + if: github.repository_owner == 'deepmodeling' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Run tdg-github-action + uses: ribtoks/tdg-github-action@master + with: + TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + SHA: ${{ github.sha }} + REF: ${{ github.ref }} + EXCLUDE_PATTERN: "(source/3rdparty|.git)/.*" + COMMENT_ON_ISSUES: 1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 892962b2fe..a611e819f0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,7 +12,6 @@ repos: - id: check-json - id: check-added-large-files args: ['--maxkb=1024', '--enforce-all'] - # TODO: remove the following after resolved exclude: | (?x)^( source/tests/infer/dipolecharge_e.pbtxt| diff --git a/deepmd/common.py b/deepmd/common.py index c776975591..84f98c6318 100644 --- a/deepmd/common.py +++ b/deepmd/common.py @@ -71,8 +71,9 @@ ) -# TODO this is not a good way to do things. This is some global variable to which -# TODO anyone can write and there is no good way to keep track of the changes +# TODO: refactor data_requirement to make it not a global variable +# this is not a good way to do things. This is some global variable to which +# anyone can write and there is no good way to keep track of the changes data_requirement = {} @@ -180,9 +181,10 @@ def make_default_mesh(pbc: bool, mixed_type: bool) -> np.ndarray: return default_mesh -# TODO maybe rename this to j_deprecated and only warn about deprecated keys, -# TODO if the deprecated_key argument is left empty function puppose is only custom -# TODO error since dict[key] already raises KeyError when the key is missing +# TODO: rename j_must_have to j_deprecated and only warn about deprecated keys +# maybe rename this to j_deprecated and only warn about deprecated keys, +# if the deprecated_key argument is left empty function puppose is only custom +# error since dict[key] already raises KeyError when the key is missing def j_must_have( jdata: Dict[str, "_DICT_VAL"], key: str, deprecated_key: List[str] = [] ) -> "_DICT_VAL": @@ -238,7 +240,7 @@ def j_loader(filename: Union[str, Path]) -> Dict[str, Any]: raise TypeError("config file must be json, or yaml/yml") -# TODO port completely to pathlib when all callers are ported +# TODO port expand_sys_str completely to pathlib when all callers are ported def expand_sys_str(root_dir: Union[str, Path]) -> List[str]: """Recursively iterate over directories taking those that contain `type.raw` file. diff --git a/deepmd/dpmodel/fitting/general_fitting.py b/deepmd/dpmodel/fitting/general_fitting.py index 3b0d022562..5681f5bf0c 100644 --- a/deepmd/dpmodel/fitting/general_fitting.py +++ b/deepmd/dpmodel/fitting/general_fitting.py @@ -313,7 +313,8 @@ def _call_common( ) xx = descriptor if self.remove_vaccum_contribution is not None: - # TODO: Idealy, the input for vaccum should be computed; + # TODO: comput the input for vaccum when setting remove_vaccum_contribution + # Idealy, the input for vaccum should be computed; # we consider it as always zero for convenience. # Needs a compute_input_stats for vaccum passed from the # descriptor. diff --git a/deepmd/dpmodel/utils/network.py b/deepmd/dpmodel/utils/network.py index 817448ac50..661358ed70 100644 --- a/deepmd/dpmodel/utils/network.py +++ b/deepmd/dpmodel/utils/network.py @@ -90,7 +90,8 @@ def __call__(self): return self.count -# TODO: should be moved to otherwhere... +# TODO: move save_dp_model and load_dp_model to a seperated module +# should be moved to otherwhere... def save_dp_model(filename: str, model_dict: dict) -> None: """Save a DP model to a file in the native format. diff --git a/deepmd/dpmodel/utils/update_sel.py b/deepmd/dpmodel/utils/update_sel.py index f36e63651d..48463b5743 100644 --- a/deepmd/dpmodel/utils/update_sel.py +++ b/deepmd/dpmodel/utils/update_sel.py @@ -17,5 +17,5 @@ def neighbor_stat(self) -> Type[NeighborStat]: return NeighborStat def hook(self, min_nbor_dist, max_nbor_size): - # TODO: save to the model + # TODO: save to the model in UpdateSel.hook pass diff --git a/deepmd/pt/entrypoints/main.py b/deepmd/pt/entrypoints/main.py index 46d284a395..b9c4971116 100644 --- a/deepmd/pt/entrypoints/main.py +++ b/deepmd/pt/entrypoints/main.py @@ -285,9 +285,7 @@ def freeze(FLAGS): torch.jit.save( model, FLAGS.output, - { - # TODO: _extra_files - }, + {}, ) diff --git a/deepmd/pt/loss/ener.py b/deepmd/pt/loss/ener.py index 1d70528e88..f29c3231f1 100644 --- a/deepmd/pt/loss/ener.py +++ b/deepmd/pt/loss/ener.py @@ -77,7 +77,7 @@ def __init__( self.has_f = (start_pref_f != 0.0 and limit_pref_f != 0.0) or inference self.has_v = (start_pref_v != 0.0 and limit_pref_v != 0.0) or inference - # TODO need support for atomic energy and atomic pref + # TODO EnergyStdLoss need support for atomic energy and atomic pref self.has_ae = (start_pref_ae != 0.0 and limit_pref_ae != 0.0) or inference self.has_pf = (start_pref_pf != 0.0 and limit_pref_pf != 0.0) or inference diff --git a/deepmd/pt/loss/ener_spin.py b/deepmd/pt/loss/ener_spin.py index b94acf26ea..1f55dcc5df 100644 --- a/deepmd/pt/loss/ener_spin.py +++ b/deepmd/pt/loss/ener_spin.py @@ -47,7 +47,7 @@ def __init__( self.has_fr = (start_pref_fr != 0.0 and limit_pref_fr != 0.0) or inference self.has_fm = (start_pref_fm != 0.0 and limit_pref_fm != 0.0) or inference - # TODO need support for virial, atomic energy and atomic pref + # TODO EnergySpinLoss needs support for virial, atomic energy and atomic pref self.has_v = (start_pref_v != 0.0 and limit_pref_v != 0.0) or inference self.has_ae = (start_pref_ae != 0.0 and limit_pref_ae != 0.0) or inference self.has_pf = (start_pref_pf != 0.0 and limit_pref_pf != 0.0) or inference diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py index 4637178318..c8edee5b94 100644 --- a/deepmd/pt/model/task/fitting.py +++ b/deepmd/pt/model/task/fitting.py @@ -457,7 +457,8 @@ def _forward_common( ): xx = descriptor if self.remove_vaccum_contribution is not None: - # TODO: Idealy, the input for vaccum should be computed; + # TODO: compute the input for vaccm when remove_vaccum_contribution is set + # Idealy, the input for vaccum should be computed; # we consider it as always zero for convenience. # Needs a compute_input_stats for vaccum passed from the # descriptor. diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index fc293f70ec..2056b9b305 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -558,14 +558,16 @@ def get_loss(loss_params, start_lr, _ntypes, _model): output_device=LOCAL_RANK, ) - # TODO ZD add lr warmups for multitask + # TODO add lr warmups for multitask + # author: iProzd def warm_up_linear(step, warmup_steps): if step < warmup_steps: return step / warmup_steps else: return self.lr_exp.value(step - warmup_steps) / self.lr_exp.start_lr - # TODO ZD add optimizers for multitask + # TODO add optimizers for multitask + # author: iProzd if self.opt_type == "Adam": self.optimizer = torch.optim.Adam( self.wrapper.parameters(), lr=self.lr_exp.start_lr diff --git a/deepmd/pt/utils/update_sel.py b/deepmd/pt/utils/update_sel.py index 2d077acac1..8c2d0699f2 100644 --- a/deepmd/pt/utils/update_sel.py +++ b/deepmd/pt/utils/update_sel.py @@ -17,5 +17,5 @@ def neighbor_stat(self) -> Type[NeighborStat]: return NeighborStat def hook(self, min_nbor_dist, max_nbor_size): - # TODO: save to the model + # TODO: save to the model in UpdateSel.hook pass diff --git a/deepmd/tf/descriptor/descriptor.py b/deepmd/tf/descriptor/descriptor.py index dbf260bfe8..82b09c95fb 100644 --- a/deepmd/tf/descriptor/descriptor.py +++ b/deepmd/tf/descriptor/descriptor.py @@ -102,9 +102,6 @@ def get_dim_rot_mat_1(self) -> int: int the first dimension of the rotation matrix """ - # TODO: I think this method should be implemented as it's called by dipole and - # polar fitting network. However, currently not all descriptors have this - # method. raise NotImplementedError def get_nlist(self) -> Tuple[tf.Tensor, tf.Tensor, List[int], List[int]]: @@ -121,8 +118,6 @@ def get_nlist(self) -> Tuple[tf.Tensor, tf.Tensor, List[int], List[int]]: sel_r : list[int] The number of neighbors with only radial information """ - # TODO: I think this method should be implemented as it's called by energy - # model. However, se_ar and hybrid doesn't have this method. raise NotImplementedError @abstractmethod diff --git a/deepmd/tf/descriptor/se_a.py b/deepmd/tf/descriptor/se_a.py index 4635554610..8b6ae3539b 100644 --- a/deepmd/tf/descriptor/se_a.py +++ b/deepmd/tf/descriptor/se_a.py @@ -1426,7 +1426,8 @@ def serialize(self, suffix: str = "") -> dict: raise NotImplementedError("spin is unsupported") assert self.davg is not None assert self.dstd is not None - # TODO: not sure how to handle type embedding - type embedding is not a model parameter, + # TODO: tf: handle type embedding in DescrptSeA.serialize + # not sure how to handle type embedding - type embedding is not a model parameter, # but instead a part of the input data. Maybe the interface should be refactored... return { diff --git a/deepmd/tf/descriptor/se_a_mask.py b/deepmd/tf/descriptor/se_a_mask.py index 55b34adf48..ace8a47bbc 100644 --- a/deepmd/tf/descriptor/se_a_mask.py +++ b/deepmd/tf/descriptor/se_a_mask.py @@ -249,10 +249,9 @@ def compute_input_stats( **kwargs Additional keyword arguments. """ - """ - TODO: Since not all input atoms are real in se_a_mask, - statistics should be reimplemented for se_a_mask descriptor. - """ + # TODO: implement compute_input_stats for DescrptSeAMask + # Since not all input atoms are real in se_a_mask, + # statistics should be reimplemented for se_a_mask descriptor. self.davg = None self.dstd = None diff --git a/deepmd/tf/descriptor/se_r.py b/deepmd/tf/descriptor/se_r.py index 9f88ebe37d..8ef48c0de2 100644 --- a/deepmd/tf/descriptor/se_r.py +++ b/deepmd/tf/descriptor/se_r.py @@ -766,7 +766,8 @@ def serialize(self, suffix: str = "") -> dict: raise NotImplementedError("spin is unsupported") assert self.davg is not None assert self.dstd is not None - # TODO: not sure how to handle type embedding - type embedding is not a model parameter, + # TODO: tf: handle type embedding in DescrptSeR.serialize + # not sure how to handle type embedding - type embedding is not a model parameter, # but instead a part of the input data. Maybe the interface should be refactored... return { "@class": "Descriptor", diff --git a/deepmd/tf/env.py b/deepmd/tf/env.py index 0b16c8758a..8cc1cacad1 100644 --- a/deepmd/tf/env.py +++ b/deepmd/tf/env.py @@ -157,7 +157,8 @@ def dlopen_library(module: str, filename: str): r"(final)_layer_type_(\d+)/(matrix)|" r"(final)_layer/(bias)|" r"(final)_layer_type_(\d+)/(bias)|" - # TODO: not sure how to parse for shared layers... + # TODO: supporting extracting parameters for shared layers + # not sure how to parse for shared layers... # layer_name r"share_.+_type_\d/matrix|" r"share_.+_type_\d/bias|" diff --git a/deepmd/tf/fit/dipole.py b/deepmd/tf/fit/dipole.py index f503789308..978fd958fb 100644 --- a/deepmd/tf/fit/dipole.py +++ b/deepmd/tf/fit/dipole.py @@ -355,7 +355,7 @@ def serialize(self, suffix: str) -> dict: "dim_descrpt": self.dim_descrpt, "embedding_width": self.dim_rot_mat_1, # very bad design: type embedding is not passed to the class - # TODO: refactor the class + # TODO: refactor the class for type embedding and dipole fitting "mixed_types": False, "dim_out": 3, "neuron": self.n_neuron, @@ -365,7 +365,7 @@ def serialize(self, suffix: str) -> dict: "exclude_types": [], "nets": self.serialize_network( ntypes=self.ntypes, - # TODO: consider type embeddings + # TODO: consider type embeddings in dipole fitting ndim=1, in_dim=self.dim_descrpt, out_dim=self.dim_rot_mat_1, diff --git a/deepmd/tf/fit/dos.py b/deepmd/tf/fit/dos.py index aef134da92..292db8d5b4 100644 --- a/deepmd/tf/fit/dos.py +++ b/deepmd/tf/fit/dos.py @@ -701,7 +701,7 @@ def serialize(self, suffix: str = "") -> dict: "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, # very bad design: type embedding is not passed to the class - # TODO: refactor the class + # TODO: refactor the class for DOSFitting and type embedding "mixed_types": False, "dim_out": self.numb_dos, "neuron": self.n_neuron, @@ -715,7 +715,7 @@ def serialize(self, suffix: str = "") -> dict: "exclude_types": [], "nets": self.serialize_network( ntypes=self.ntypes, - # TODO: consider type embeddings + # TODO: consider type embeddings for DOSFitting ndim=1, in_dim=self.dim_descrpt + self.numb_fparam + self.numb_aparam, out_dim=self.numb_dos, diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index f8f5c3b346..b391b00052 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -893,7 +893,7 @@ def serialize(self, suffix: str = "") -> dict: "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, # very bad design: type embedding is not passed to the class - # TODO: refactor the class + # TODO: refactor the class for energy fitting and type embedding "mixed_types": False, "dim_out": 1, "neuron": self.n_neuron, @@ -912,7 +912,7 @@ def serialize(self, suffix: str = "") -> dict: "exclude_types": [], "nets": self.serialize_network( ntypes=self.ntypes, - # TODO: consider type embeddings + # TODO: consider type embeddings for type embedding ndim=1, in_dim=self.dim_descrpt + self.numb_fparam + self.numb_aparam, neuron=self.n_neuron, diff --git a/deepmd/tf/fit/polar.py b/deepmd/tf/fit/polar.py index 41ea989521..21b9587b88 100644 --- a/deepmd/tf/fit/polar.py +++ b/deepmd/tf/fit/polar.py @@ -545,7 +545,7 @@ def serialize(self, suffix: str) -> dict: "dim_descrpt": self.dim_descrpt, "embedding_width": self.dim_rot_mat_1, # very bad design: type embedding is not passed to the class - # TODO: refactor the class + # TODO: refactor the class for polar fitting and type embedding "mixed_types": False, "dim_out": 3, "neuron": self.n_neuron, @@ -558,7 +558,7 @@ def serialize(self, suffix: str) -> dict: "shift_diag": self.shift_diag, "nets": self.serialize_network( ntypes=self.ntypes, - # TODO: consider type embeddings + # TODO: consider type embeddings for polar fitting ndim=1, in_dim=self.dim_descrpt, out_dim=self.dim_rot_mat_1, diff --git a/deepmd/tf/infer/deep_tensor.py b/deepmd/tf/infer/deep_tensor.py index 9e8acf8241..59fdab7cd1 100644 --- a/deepmd/tf/infer/deep_tensor.py +++ b/deepmd/tf/infer/deep_tensor.py @@ -412,7 +412,6 @@ def eval_full( if ghost_map is not None: # add the value of ghost atoms to real atoms force = np.reshape(force, [nframes * nout, -1, 3]) - # TODO: is there some way not to use for loop? for ii in range(nframes * nout): np.add.at(force[ii], ghost_map, force[ii, nloc:]) if atomic: diff --git a/deepmd/tf/loss/ener.py b/deepmd/tf/loss/ener.py index 48a13319e4..baa4aa3e02 100644 --- a/deepmd/tf/loss/ener.py +++ b/deepmd/tf/loss/ener.py @@ -120,7 +120,6 @@ def __init__( "atom_pref", 1, atomic=True, must=False, high_prec=False, repeat=3 ) # drdq: the partial derivative of atomic coordinates w.r.t. generalized coordinates - # TODO: could numb_generalized_coord decided from the training data? if self.has_gf > 0: add_data_requirement( "drdq", diff --git a/deepmd/tf/model/linear.py b/deepmd/tf/model/linear.py index da866ccc5f..ae1b0b5c78 100644 --- a/deepmd/tf/model/linear.py +++ b/deepmd/tf/model/linear.py @@ -54,7 +54,6 @@ def __init__(self, models: List[dict], weights: List[float], **kwargs): self.weights = [1 / len(models) for _ in range(len(models))] elif weights == "sum": self.weights = [1 for _ in range(len(models))] - # TODO: add more weights, for example, so-called committee models else: raise ValueError(f"Invalid weights {weights}") diff --git a/deepmd/tf/train/trainer.py b/deepmd/tf/train/trainer.py index c8668174fd..125b795d2e 100644 --- a/deepmd/tf/train/trainer.py +++ b/deepmd/tf/train/trainer.py @@ -296,8 +296,6 @@ def build(self, data=None, stop_batch=0, origin_type_map=None, suffix=""): ) # neighbor_stat is moved to train.py as duplicated - # TODO: this is a simple fix but we should have a clear - # architecture to call neighbor stat else: self.model.enable_compression() diff --git a/deepmd/tf/utils/batch_size.py b/deepmd/tf/utils/batch_size.py index 8436934cee..33f1ec0da0 100644 --- a/deepmd/tf/utils/batch_size.py +++ b/deepmd/tf/utils/batch_size.py @@ -35,6 +35,4 @@ def is_oom_error(self, e: Exception) -> bool: e : Exception Exception """ - # TODO: it's very slow to catch OOM error; I don't know what TF is doing here - # but luckily we only need to catch once return isinstance(e, (tf.errors.ResourceExhaustedError, OutOfMemoryError)) diff --git a/deepmd/tf/utils/finetune.py b/deepmd/tf/utils/finetune.py index 38824b6954..3d11130ba7 100644 --- a/deepmd/tf/utils/finetune.py +++ b/deepmd/tf/utils/finetune.py @@ -100,7 +100,7 @@ def replace_model_params_with_pretrained_model( ): target_para = pretrained_jdata["model"][config_key] cur_para = jdata["model"][config_key] - # keep some params that are irrelevant to model structures (need to discuss) TODO + # TODO: keep some params that are irrelevant to model structures (need to discuss) if "trainable" in cur_para.keys(): target_para["trainable"] = cur_para["trainable"] log.info(f"Change the '{config_key}' from {cur_para!s} to {target_para!s}.") diff --git a/deepmd/tf/utils/graph.py b/deepmd/tf/utils/graph.py index f09250aa9f..3ed43343fa 100644 --- a/deepmd/tf/utils/graph.py +++ b/deepmd/tf/utils/graph.py @@ -22,7 +22,6 @@ ) -# TODO (JZ): I think in this file we can merge some duplicated lines into one method... def load_graph_def(model_file: str) -> Tuple[tf.Graph, tf.GraphDef]: """Load graph as well as the graph_def from the frozen model(model_file). diff --git a/deepmd/tf/utils/multi_init.py b/deepmd/tf/utils/multi_init.py index 2e3c43c069..aafa9461b0 100644 --- a/deepmd/tf/utils/multi_init.py +++ b/deepmd/tf/utils/multi_init.py @@ -164,7 +164,7 @@ def replace_model_params_with_frz_multi_model( def _change_sub_config(jdata: Dict[str, Any], src_jdata: Dict[str, Any], sub_key: str): target_para = src_jdata[sub_key] cur_para = jdata[sub_key] - # keep some params that are irrelevant to model structures (need to discuss) TODO + # TODO: keep some params that are irrelevant to model structures (need to discuss) if "trainable" in cur_para.keys(): target_para["trainable"] = cur_para["trainable"] log.info(f"Change the '{sub_key}' from {cur_para!s} to {target_para!s}.") diff --git a/deepmd/tf/utils/update_sel.py b/deepmd/tf/utils/update_sel.py index bed6274f56..db0420dde8 100644 --- a/deepmd/tf/utils/update_sel.py +++ b/deepmd/tf/utils/update_sel.py @@ -24,8 +24,6 @@ def neighbor_stat(self) -> Type[NeighborStat]: def hook(self, min_nbor_dist, max_nbor_size): # moved from traier.py as duplicated - # TODO: this is a simple fix but we should have a clear - # architecture to call neighbor stat tf.constant( min_nbor_dist, name="train_attr/min_nbor_dist", diff --git a/deepmd/utils/batch_size.py b/deepmd/utils/batch_size.py index c85806458f..b35d9833d5 100644 --- a/deepmd/utils/batch_size.py +++ b/deepmd/utils/batch_size.py @@ -51,7 +51,6 @@ class AutoBatchSize(ABC): def __init__(self, initial_batch_size: int = 1024, factor: float = 2.0) -> None: # See also PyTorchLightning/pytorch-lightning#1638 - # TODO: discuss a proper initial batch size self.current_batch_size = initial_batch_size DP_INFER_BATCH_SIZE = int(os.environ.get("DP_INFER_BATCH_SIZE", 0)) if DP_INFER_BATCH_SIZE > 0: diff --git a/deepmd/utils/data_system.py b/deepmd/utils/data_system.py index 0c74abfed1..640083bc33 100644 --- a/deepmd/utils/data_system.py +++ b/deepmd/utils/data_system.py @@ -670,7 +670,6 @@ def print_summary( % ( _format_name_length(system_dirs[ii], sys_width), natoms[ii], - # TODO batch size * nbatches = number of structures batch_size[ii], nbatches[ii], sys_probs[ii], diff --git a/deepmd/utils/path.py b/deepmd/utils/path.py index 5887e91850..afe14703a0 100644 --- a/deepmd/utils/path.py +++ b/deepmd/utils/path.py @@ -39,7 +39,6 @@ def __new__(cls, path: str, mode: str = "r"): return super().__new__(DPOSPath) elif os.path.isfile(path.split("#")[0]): # assume h5 if it is not dir - # TODO: check if it is a real h5? or just check suffix? return super().__new__(DPH5Path) raise FileNotFoundError("%s not found" % path) return super().__new__(cls) @@ -217,7 +216,6 @@ def glob(self, pattern: str) -> List["DPPath"]: list of paths """ # currently DPOSPath will only derivative DPOSPath - # TODO: discuss if we want to mix DPOSPath and DPH5Path? return [type(self)(p, mode=self.mode) for p in self.path.glob(pattern)] def rglob(self, pattern: str) -> List["DPPath"]: diff --git a/pyproject.toml b/pyproject.toml index 84cc7237bc..128364249a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [build-system] requires = [ + # TODO: unpin the upper bound when scikit-build dynamic metadata API is stable # dynamic metadata API is still unstable - # TODO: unpin the upper bound when it is stable "scikit-build-core>=0.5,<0.9,!=0.6.0", "packaging", ] @@ -134,7 +134,7 @@ test-command = [ test-extras = ["cpu", "test", "lmp", "ipi"] build = ["cp310-*"] skip = ["*-win32", "*-manylinux_i686", "*-musllinux*"] -# TODO: uncomment when CUDA 11 is deprecated +# TODO: uncomment to use the latest image when CUDA 11 is deprecated # manylinux-x86_64-image = "manylinux_2_28" manylinux-x86_64-image = "quay.io/pypa/manylinux_2_28_x86_64:2022-11-19-1b19e81" manylinux-aarch64-image = "manylinux_2_28" diff --git a/source/api_cc/include/common.h b/source/api_cc/include/common.h index 4743336e0c..2010780a6c 100644 --- a/source/api_cc/include/common.h +++ b/source/api_cc/include/common.h @@ -13,7 +13,6 @@ namespace deepmd { typedef double ENERGYTYPE; -// TODO: currently we only implement TF; reserve for future use enum DPBackend { TensorFlow, PyTorch, Paddle, Unknown }; struct NeighborListData { diff --git a/source/api_cc/src/DeepPot.cc b/source/api_cc/src/DeepPot.cc index 442e2d90cc..498f35f46b 100644 --- a/source/api_cc/src/DeepPot.cc +++ b/source/api_cc/src/DeepPot.cc @@ -1,12 +1,11 @@ // SPDX-License-Identifier: LGPL-3.0-or-later #include "DeepPot.h" -#include "common.h" -// TODO: only include when TF backend is built #include #include #include "AtomMap.h" +#include "common.h" #ifdef BUILD_TENSORFLOW #include "DeepPotTF.h" #endif diff --git a/source/op/tabulate_multi_device.cc b/source/op/tabulate_multi_device.cc index 6a70f60a96..50267df556 100644 --- a/source/op/tabulate_multi_device.cc +++ b/source/op/tabulate_multi_device.cc @@ -191,7 +191,7 @@ class TabulateFusionSeAOp : public OpKernel { errors::InvalidArgument("Dim of input should be 3")); TensorShape descriptor_shape; descriptor_shape.AddDim(em_tensor.shape().dim_size(0)); - descriptor_shape.AddDim(4); // TODO: be careful here; + descriptor_shape.AddDim(4); // be careful here; descriptor_shape.AddDim(last_layer_size); int context_output_index = 0; Tensor* descriptor_tensor = NULL; @@ -390,7 +390,7 @@ class TabulateFusionSeAttenOp : public OpKernel { errors::InvalidArgument("Dim of input should be 2")); TensorShape descriptor_shape; descriptor_shape.AddDim(em_tensor.shape().dim_size(0)); - descriptor_shape.AddDim(4); // TODO: be careful here; + descriptor_shape.AddDim(4); // be careful here; descriptor_shape.AddDim(last_layer_size); int context_output_index = 0; Tensor* descriptor_tensor = NULL; @@ -786,8 +786,7 @@ class TabulateFusionSeROp : public OpKernel { errors::InvalidArgument("Dim of input should be 2")); TensorShape descriptor_shape; descriptor_shape.AddDim(em_tensor.shape().dim_size(0)); - descriptor_shape.AddDim( - em_tensor.shape().dim_size(1)); // TODO: be careful here; + descriptor_shape.AddDim(em_tensor.shape().dim_size(1)); // be careful here; descriptor_shape.AddDim(last_layer_size); int context_output_index = 0; Tensor* descriptor_tensor = NULL;