From 0c06e6a1f5641ef2ba8e44aede384d36b0392629 Mon Sep 17 00:00:00 2001 From: majorli Date: Tue, 6 Aug 2024 11:47:09 +0800 Subject: [PATCH] update roformer roberta and widedeep model base on toolbox ByteMLPerf Signed-off-by: majorli --- models/nlp/language_model/roberta/ixrt/README.md | 10 +++++++--- models/nlp/language_model/roformer/ixrt/README.md | 11 +++++++---- models/recommendation/widedeep/ixrt/README.md | 8 +++++--- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/models/nlp/language_model/roberta/ixrt/README.md b/models/nlp/language_model/roberta/ixrt/README.md index 5ba6e888..0588c797 100644 --- a/models/nlp/language_model/roberta/ixrt/README.md +++ b/models/nlp/language_model/roberta/ixrt/README.md @@ -9,6 +9,10 @@ Language model pretraining has led to significant performance gains but careful ### Install ```bash +export PROJ_ROOT=/PATH/TO/DEEPSPARKINFERENCE +export MODEL_PATH=${PROJ_ROOT}/models/nlp/language_model/roberta/ixrt +cd ${MODEL_PATH} + pip3 install onnxsim pip3 install py-libnuma==1.2 pip3 install bert @@ -32,8 +36,7 @@ tar xf open_roberta.tar rm -f open_roberta.tar # get roberta-torch-fp32.json -git clone -b iluvatar_general_infer https://github.com/yudefu/ByteMLPerf.git -cp ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/roberta-torch-fp32.json ./ +wget https://raw.githubusercontent.com/bytedance/ByteMLPerf/main/byte_infer_perf/general_perf/model_zoo/roberta-torch-fp32.json # export onnx python3 export_onnx.py --model_path open_roberta/roberta-base-squad.pt --output_path open_roberta/roberta-torch-fp32.onnx @@ -63,7 +66,8 @@ If you want to evaluate the accuracy of this model, please visit the website: Note: You need to modify the relevant paths in the code to your own correct paths. ```bash -# Install requirements +# Link and install requirements +ln -s ${PROJ_ROOT}/toolbox/ByteMLPerf ./ pip3 install -r ./ByteMLPerf/byte_infer_perf/general_perf/requirements.txt pip3 install -r ./ByteMLPerf/byte_infer_perf/general_perf/backends/ILUVATAR/requirements.txt mv perf_engine.py ./ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py diff --git a/models/nlp/language_model/roformer/ixrt/README.md b/models/nlp/language_model/roformer/ixrt/README.md index ba1e5975..c088cf0f 100644 --- a/models/nlp/language_model/roformer/ixrt/README.md +++ b/models/nlp/language_model/roformer/ixrt/README.md @@ -26,7 +26,9 @@ Dataset: ```bash # Go to path of this model -cd ${PROJ_ROOT}/models/nlp/language_model/roformer/ixrt +export PROJ_ROOT=/PATH/TO/DEEPSPARKINFERENCE +export MODEL_PATH=${PROJ_ROOT}/models/nlp/language_model/roformer/ixrt +cd ${MODEL_PATH} # Download the pretrained model and dataset to 'data' mkdir -p data/ @@ -69,16 +71,17 @@ If you want to evaluate the accuracy of this model, please visit the website: Note: You need to modify the relevant paths in the code to your own correct paths. ```bash -# Clone ByteMLPerf -git clone -b iluvatar_general_infer https://github.com/yudefu/ByteMLPerf.git +# link ByteMLPerf and install requirements +ln -s ${PROJ_ROOT}/toolbox/ByteMLPerf ./ pip3 install -r ./ByteMLPerf/byte_infer_perf/general_perf/requirements.txt + mv perf_engine.py ./ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py -mkdir -p ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ # Comment Line102 in compile_backend_iluvatar.py sed -i '102s/build_engine/# build_engine/' ./ByteMLPerf/byte_infer_perf/general_perf/backends/ILUVATAR/compile_backend_iluvatar.py # Move open_roformer +mkdir -p ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ mv ./data/open_roformer ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ # Setup open_cail2019 dataset diff --git a/models/recommendation/widedeep/ixrt/README.md b/models/recommendation/widedeep/ixrt/README.md index fb01a4d4..350a6da3 100644 --- a/models/recommendation/widedeep/ixrt/README.md +++ b/models/recommendation/widedeep/ixrt/README.md @@ -23,7 +23,9 @@ Dataset: ```bash # Go to path of this model -cd ${PROJ_ROOT}/models/recommendationwidedeep/ixrt +export PROJ_ROOT=/PATH/TO/DEEPSPARKINFERENCE +export MODEL_PATH=${PROJ_ROOT}/models/recommendation/widedeep/ixrt +cd ${MODEL_PATH} # export onnx python3 export_onnx.py --model_path open_wide_deep_saved_model --output_path open_wide_deep_saved_model/widedeep.onnx @@ -55,8 +57,8 @@ If you want to evaluate the accuracy of this model, please visit the website: Note: You need to modify the relevant paths in the code to your own correct paths. ```bash -# Clone ByteMLPerf -git clone -b iluvatar_general_infer https://github.com/yudefu/ByteMLPerf.git +# link and install ByteMLPerf requirements +ln -s ${PROJ_ROOT}/toolbox/ByteMLPerf ./ pip3 install -r ./ByteMLPerf/byte_infer_perf/general_perf/requirements.txt mv perf_engine.py ./ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py -- Gitee