Skip to content

Commit

Permalink
simplify compile code
Browse files Browse the repository at this point in the history
  • Loading branch information
HydrogenSulfate committed Sep 29, 2024
1 parent f1cae59 commit a83fb63
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 58 deletions.
4 changes: 2 additions & 2 deletions doc/freeze/freeze.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ The output model is called `model_branch1.pth`, which is the specifically frozen
:::{tab-item} Paddle {{ paddle_icon }}

```bash
$ dp --pd freeze -o model.json
$ dp --pd freeze -o model
DEEPMD INFO Paddle inference model has been exported to: model.json(.pdiparams)
```

Expand All @@ -48,7 +48,7 @@ In [multi-task mode](../train/multi-task-training-pt.md), you need to choose one
to specify which model branch you want to freeze:

```bash
$ dp --pd freeze -o model_branch1.json --head CHOSEN_BRANCH
$ dp --pd freeze -o model_branch1 --head CHOSEN_BRANCH
```

The output model is called `model_branch1.json`, which is the specifically frozen model with the `CHOSEN_BRANCH` head.
6 changes: 3 additions & 3 deletions doc/install/install-from-source.md
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ One should remember to activate the virtual environment every time he/she uses D

Check the compiler version on your machine

```
```bash
gcc --version
```

Expand Down Expand Up @@ -422,10 +422,10 @@ cmake -DENABLE_PYTORCH=TRUE -DUSE_PT_PYTHON_LIBS=TRUE -DCMAKE_INSTALL_PREFIX=$de

:::{tab-item} Paddle {{ paddle_icon }}

I assume you have installed the Paddle (either Python or C++ interface) to `$paddle_root`, then execute CMake
I assume you have compiled the Paddle inference library(C++ interface) to `$PADDLE_INFERENCE_DIR`, then execute CMake

```bash
cmake -DENABLE_PYTORCH=TRUE -DCMAKE_PREFIX_PATH=$paddle_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..
cmake -DENABLE_PADDLE=ON -DCMAKE_PREFIX_PATH=$PADDLE_INFERENCE_DIR -DPADDLE_INFERENCE_DIR=$PADDLE_INFERENCE_DIR -DCMAKE_INSTALL_PREFIX=$deepmd_root ..
```

:::
Expand Down
2 changes: 2 additions & 0 deletions source/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,9 @@ if(ENABLE_PADDLE)
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib")
link_directories("${PADDLE_INFERENCE_DIR}/paddle/lib")
# if (USE_ROCM_TOOLKIT)
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=1)
# endif()
endif(ENABLE_PADDLE)

if(BUILD_TESTING)
Expand Down
69 changes: 16 additions & 53 deletions source/install/build_cc_pd.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,70 +22,33 @@ export LAMMPS_DIR="/workspace/hesensen/deepmd_backend/deepmd_paddle_new/source/b
export LAMMPS_SOURCE_ROOT="/workspace/hesensen/deepmd_backend/deepmd_paddle_new/source/build_lammps/lammps-stable_29Aug2024/"

# 设置推理时的 GPU 卡号
export CUDA_VISIBLE_DEVICES=3
# export FLAGS_benchmark=1
# export GLOG_v=6
export CUDA_VISIBLE_DEVICES=1

# PADDLE_DIR 设置为第二步 clone下来的 Paddle 目录
export PADDLE_DIR="/workspace/hesensen/PaddleScience_enn_debug/Paddle/"

# DEEPMD_DIR 设置为本项目的根目录
export DEEPMD_DIR="/workspace/hesensen/deepmd_backend/deepmd_paddle_new/"
# deepmd_root 设置为本项目的根目录
export deepmd_root="/workspace/hesensen/deepmd_backend/deepmd_paddle_new/"

# PADDLE_INFERENCE_DIR 设置为第二步编译得到的 Paddle 推理库目录
export PADDLE_INFERENCE_DIR="/workspace/hesensen/PaddleScience_enn_debug/Paddle/build/paddle_inference_install_dir/"

# TENSORFLOW_DIR 设置为 tensorflow 的安装目录,可用 pip show tensorflow 确定
# export TENSORFLOW_DIR="/path/to/tensorflow"

export LD_LIBRARY_PATH=${PADDLE_DIR}/paddle/fluid/pybind/:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${DEEPMD_DIR}/deepmd/op:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${deepmd_root}/deepmd/op:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${PADDLE_INFERENCE_DIR}/paddle/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${PADDLE_INFERENCE_DIR}/third_party/install/mkldnn/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${PADDLE_INFERENCE_DIR}/third_party/install/mklml/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${DEEPMD_DIR}/source/build:$LD_LIBRARY_PATH
export LIBRARY_PATH=${DEEPMD_DIR}/deepmd/op:$LIBRARY_PATH
# export FLAGS_check_nan_inf=1
# cd ${DEEPMD_DIR}/source
# rm -rf build # 若改动CMakeLists.txt,则需要打开该注释
# mkdir build
# cd -

# DEEPMD_INSTALL_DIR 设置为 deepmd-lammps 的目标安装目录,可自行设置任意路径
# export DEEPMD_INSTALL_DIR="path/to/deepmd_root"

# 开始编译
# cmake -DCMAKE_INSTALL_PREFIX=${DEEPMD_INSTALL_DIR} \
# -DUSE_CUDA_TOOLKIT=TRUE \
# -DTENSORFLOW_ROOT=${TENSORFLOW_DIR} \
# -DPADDLE_LIB=${PADDLE_INFERENCE_DIR} \
# -DFLOAT_PREC=low ..
# make -j4 && make install
# make lammps

# cd ${LAMMPS_DIR}/src/
# \cp -r ${DEEPMD_DIR}/source/build/USER-DEEPMD .
# make yes-kspace
# make yes-extra-fix
# make yes-user-deepmd
# make serial -j
# export PATH=${LAMMPS_DIR}/src:$PATH

# cd ${DEEPMD_DIR}/examples/water/lmp
export LD_LIBRARY_PATH=${deepmd_root}/source/build:$LD_LIBRARY_PATH

# lmp_serial -in in.lammps
cd ${deepmd_root}/source
rm -rf build # 若改动CMakeLists.txt,则需要打开该注释
mkdir build
cd -

BUILD_TMP_DIR=${SCRIPT_PATH}/../build
mkdir -p ${BUILD_TMP_DIR}
cd ${BUILD_TMP_DIR}
cmake -D ENABLE_TENSORFLOW=OFF \
-D ENABLE_PYTORCH=OFF \
-D ENABLE_PADDLE=ON \
-D PADDLE_LIB=${PADDLE_INFERENCE_DIR} \
cmake -D ENABLE_PADDLE=ON \
-D PADDLE_INFERENCE_DIR=${PADDLE_INFERENCE_DIR} \
-D CMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
-D USE_TF_PYTHON_LIBS=TRUE \
-D LAMMPS_SOURCE_ROOT=${LAMMPS_SOURCE_ROOT} \
-D ENABLE_IPI=OFF \
${CUDA_ARGS} \
-D LAMMPS_VERSION=stable_29Aug2024 \
..
Expand All @@ -95,23 +58,23 @@ cmake --install .
#------------------
echo "Congratulations! DeePMD-kit has been installed at ${INSTALL_PREFIX}"

cd ${DEEPMD_DIR}/source
cd ${deepmd_root}/source
cd build
make lammps
cd ${LAMMPS_DIR}/src/
\cp -r ${DEEPMD_DIR}/source/build/USER-DEEPMD .
\cp -r ${deepmd_root}/source/build/USER-DEEPMD .
make no-kspace
make yes-kspace
make no-extra-fix
make yes-extra-fix
make no-user-deepmd
make yes-user-deepmd
# make serial -j
make mpi -j 20
make mpi -j 10
export PATH=${LAMMPS_DIR}/src:$PATH

cd ${DEEPMD_DIR}/examples/water/lmp
cd ${deepmd_root}/examples/water/lmp

echo "START INFERENCE..."
# lmp_serial -in paddle_in.lammps 2>&1 | tee paddle_infer.log
mpirun -np 1 lmp_mpi -in paddle_in.lammps 2>&1 | tee paddle_infer.log
mpirun -np 2 lmp_mpi -in paddle_in.lammps 2>&1 | tee paddle_infer.log

0 comments on commit a83fb63

Please sign in to comment.