Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ venv*
.vscode/**
_build
_templates
API_CC
doc/API_CC/
doc/api_py/
doc/api_core/
doc/api_c/
Expand Down
5 changes: 5 additions & 0 deletions deepmd/pt/model/task/fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -657,3 +657,8 @@ def _forward_common(
outs = torch.where(mask[:, :, None], outs, 0.0)
results.update({self.var_name: outs})
return results

@torch.jit.export
def get_task_dim(self) -> int:
Comment thread
njzjz marked this conversation as resolved.
"""Get the output dimension of the fitting net."""
return self._net_out_dim()
247 changes: 247 additions & 0 deletions source/api_cc/include/DeepTensorPT.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,247 @@
// SPDX-License-Identifier: LGPL-3.0-or-later
#pragma once

#include <torch/script.h>
#include <torch/torch.h>

#include "DeepTensor.h"

namespace deepmd {
/**
* @brief PyTorch implementation for Deep Tensor.
**/
class DeepTensorPT : public DeepTensorBase {
public:
/**
* @brief Deep Tensor constructor without initialization.
**/
DeepTensorPT();
virtual ~DeepTensorPT();
/**
* @brief Deep Tensor constructor with initialization.
* @param[in] model The name of the frozen model file.
* @param[in] gpu_rank The GPU rank. Default is 0.
* @param[in] name_scope Name scopes of operations.
**/
DeepTensorPT(const std::string& model,
const int& gpu_rank = 0,
const std::string& name_scope = "");
/**
* @brief Initialize the Deep Tensor.
* @param[in] model The name of the frozen model file.
* @param[in] gpu_rank The GPU rank. Default is 0.
* @param[in] name_scope Name scopes of operations.
**/
void init(const std::string& model,
const int& gpu_rank = 0,
const std::string& name_scope = "");

private:
/**
* @brief Evaluate the global tensor and component-wise force and virial.
* @param[out] global_tensor The global tensor to evaluate.
* @param[out] force The component-wise force of the global tensor, size odim
*x natoms x 3.
* @param[out] virial The component-wise virial of the global tensor, size
*odim x 9.
* @param[out] atom_tensor The atomic tensor value of the model, size natoms x
*odim.
* @param[out] atom_virial The component-wise atomic virial of the global
*tensor, size odim x natoms x 9.
* @param[in] coord The coordinates of atoms. The array should be of size
*natoms x 3.
* @param[in] atype The atom types. The list should contain natoms ints.
* @param[in] box The cell of the region. The array should be of size 9.
* @param[in] request_deriv Whether to request the derivative of the global
* tensor, including force and virial.
**/
template <typename VALUETYPE>
void compute(std::vector<VALUETYPE>& global_tensor,
std::vector<VALUETYPE>& force,
std::vector<VALUETYPE>& virial,
std::vector<VALUETYPE>& atom_tensor,
std::vector<VALUETYPE>& atom_virial,
const std::vector<VALUETYPE>& coord,
const std::vector<int>& atype,
const std::vector<VALUETYPE>& box,
const bool request_deriv);
/**
* @brief Evaluate the global tensor and component-wise force and virial.
* @param[out] global_tensor The global tensor to evaluate.
* @param[out] force The component-wise force of the global tensor, size odim
*x natoms x 3.
* @param[out] virial The component-wise virial of the global tensor, size
*odim x 9.
* @param[out] atom_tensor The atomic tensor value of the model, size natoms x
*odim.
* @param[out] atom_virial The component-wise atomic virial of the global
*tensor, size odim x natoms x 9.
* @param[in] coord The coordinates of atoms. The array should be of size
*natoms x 3.
* @param[in] atype The atom types. The list should contain natoms ints.
* @param[in] box The cell of the region. The array should be of size 9.
* @param[in] nghost The number of ghost atoms.
* @param[in] inlist The input neighbour list.
* @param[in] request_deriv Whether to request the derivative of the global
* tensor, including force and virial.
**/
template <typename VALUETYPE>
void compute(std::vector<VALUETYPE>& global_tensor,
std::vector<VALUETYPE>& force,
std::vector<VALUETYPE>& virial,
std::vector<VALUETYPE>& atom_tensor,
std::vector<VALUETYPE>& atom_virial,
const std::vector<VALUETYPE>& coord,
const std::vector<int>& atype,
const std::vector<VALUETYPE>& box,
const int nghost,
const InputNlist& inlist,
const bool request_deriv);

public:
/**
* @brief Get the cutoff radius.
* @return The cutoff radius.
**/
double cutoff() const {
assert(inited);
return rcut;
};
/**
* @brief Get the number of types.
* @return The number of types.
**/
int numb_types() const {
assert(inited);
return ntypes;
};
/**
* @brief Get the output dimension.
* @return The output dimension.
**/
int output_dim() const {
assert(inited);
return odim;
};
/**
* @brief Get the list of sel types.
* @return The list of sel types.
*/
const std::vector<int>& sel_types() const {
assert(inited);
return sel_type;
};
Comment thread
njzjz marked this conversation as resolved.
/**
* @brief Get the type map (element name of the atom types) of this model.
* @param[out] type_map The type map of this model.
**/
void get_type_map(std::string& type_map);

/**
* @brief Evaluate the global tensor and component-wise force and virial.
* @param[out] global_tensor The global tensor to evaluate.
* @param[out] force The component-wise force of the global tensor, size odim
*x natoms x 3.
* @param[out] virial The component-wise virial of the global tensor, size
*odim x 9.
* @param[out] atom_tensor The atomic tensor value of the model, size natoms x
*odim.
* @param[out] atom_virial The component-wise atomic virial of the global
*tensor, size odim x natoms x 9.
* @param[in] coord The coordinates of atoms. The array should be of size
*natoms x 3.
* @param[in] atype The atom types. The list should contain natoms ints.
* @param[in] box The cell of the region. The array should be of size 9.
* @param[in] request_deriv Whether to request the derivative of the global
* tensor, including force and virial.
* @{
**/
void computew(std::vector<double>& global_tensor,
std::vector<double>& force,
std::vector<double>& virial,
std::vector<double>& atom_tensor,
std::vector<double>& atom_virial,
const std::vector<double>& coord,
const std::vector<int>& atype,
const std::vector<double>& box,
const bool request_deriv);
void computew(std::vector<float>& global_tensor,
std::vector<float>& force,
std::vector<float>& virial,
std::vector<float>& atom_tensor,
std::vector<float>& atom_virial,
const std::vector<float>& coord,
const std::vector<int>& atype,
const std::vector<float>& box,
const bool request_deriv);
/** @} */
/**
* @brief Evaluate the global tensor and component-wise force and virial.
* @param[out] global_tensor The global tensor to evaluate.
* @param[out] force The component-wise force of the global tensor, size odim
*x natoms x 3.
* @param[out] virial The component-wise virial of the global tensor, size
*odim x 9.
* @param[out] atom_tensor The atomic tensor value of the model, size natoms x
*odim.
* @param[out] atom_virial The component-wise atomic virial of the global
*tensor, size odim x natoms x 9.
* @param[in] coord The coordinates of atoms. The array should be of size
*natoms x 3.
* @param[in] atype The atom types. The list should contain natoms ints.
* @param[in] box The cell of the region. The array should be of size 9.
* @param[in] nghost The number of ghost atoms.
* @param[in] inlist The input neighbour list.
* @param[in] request_deriv Whether to request the derivative of the global
* tensor, including force and virial.
* @{
**/
void computew(std::vector<double>& global_tensor,
std::vector<double>& force,
std::vector<double>& virial,
std::vector<double>& atom_tensor,
std::vector<double>& atom_virial,
const std::vector<double>& coord,
const std::vector<int>& atype,
const std::vector<double>& box,
const int nghost,
const InputNlist& inlist,
const bool request_deriv);
void computew(std::vector<float>& global_tensor,
std::vector<float>& force,
std::vector<float>& virial,
std::vector<float>& atom_tensor,
std::vector<float>& atom_virial,
const std::vector<float>& coord,
const std::vector<int>& atype,
const std::vector<float>& box,
const int nghost,
const InputNlist& inlist,
const bool request_deriv);
/** @} */

private:
int num_intra_nthreads, num_inter_nthreads;
bool inited;
double rcut;
int ntypes;
mutable int odim;
std::vector<int> sel_type;
std::string name_scope;
// PyTorch module and device management
mutable torch::jit::script::Module module;
int gpu_id;
bool gpu_enabled;
NeighborListData nlist_data;
// Neighbor list tensors for efficient computation
at::Tensor firstneigh_tensor;

/**
* @brief Translate PyTorch exceptions to the DeePMD-kit exception.
* @param[in] f The function to run.
* @example translate_error([&](){...});
*/
void translate_error(std::function<void()> f);
};

} // namespace deepmd
9 changes: 8 additions & 1 deletion source/api_cc/src/DeepTensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@
#ifdef BUILD_TENSORFLOW
#include "DeepTensorTF.h"
#endif
#ifdef BUILD_PYTORCH
#include "DeepTensorPT.h"
#endif
#include "common.h"

using namespace deepmd;
Expand Down Expand Up @@ -38,7 +41,11 @@ void DeepTensor::init(const std::string &model,
throw deepmd::deepmd_exception("TensorFlow backend is not built.");
#endif
} else if (deepmd::DPBackend::PyTorch == backend) {
throw deepmd::deepmd_exception("PyTorch backend is not supported yet");
#ifdef BUILD_PYTORCH
dt = std::make_shared<deepmd::DeepTensorPT>(model, gpu_rank, name_scope_);
#else
throw deepmd::deepmd_exception("PyTorch backend is not built.");
#endif
} else if (deepmd::DPBackend::Paddle == backend) {
throw deepmd::deepmd_exception("PaddlePaddle backend is not supported yet");
} else {
Expand Down
Loading