diff --git a/ACL_PyTorch/built-in/audio/ConformerOfflineModel/data_gen.py b/ACL_PyTorch/built-in/audio/ConformerOfflineModel/data_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..437046d03a4e5b4a4773e01937a6d9f3d51e3af5 --- /dev/null +++ b/ACL_PyTorch/built-in/audio/ConformerOfflineModel/data_gen.py @@ -0,0 +1,5 @@ +import numpy as np + +x, x_lens = np.ones((1, 100, 80), dtype=np.float32), np.array([100]) +np.save('x.npy', x) +np.save('x_lens.npy', x_lens) diff --git a/ACL_PyTorch/built-in/audio/ConformerOfflineModel/modify_decoder_onnx.py b/ACL_PyTorch/built-in/audio/ConformerOfflineModel/modify_decoder_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..c972709d205696856362cbebb4ba37ec5e445f59 --- /dev/null +++ b/ACL_PyTorch/built-in/audio/ConformerOfflineModel/modify_decoder_onnx.py @@ -0,0 +1,35 @@ +# Copyright(C) 2024. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys + +from auto_optimizer import OnnxGraph + + +def main(): + onnx_path = sys.argv[1] + graph = OnnxGraph.parse(onnx_path) + graph.remove("/decoder/Clip") + gather = graph["/decoder/embedding/Gather"] + gather.inputs[1] = "y" + graph.update_map() + graph.infershape() + + g_sim = graph.simplify() + save_path = onnx_path.replace(".onnx", "_modified.onnx") + g_sim.save(save_path) + print("Modified model saved to ", save_path) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ACL_PyTorch/built-in/audio/ConformerOfflineModel/modify_encoder_onnx.py b/ACL_PyTorch/built-in/audio/ConformerOfflineModel/modify_encoder_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..f9aef35d11b429f671e11557f34084556350d98c --- /dev/null +++ b/ACL_PyTorch/built-in/audio/ConformerOfflineModel/modify_encoder_onnx.py @@ -0,0 +1,35 @@ +# Copyright(C) 2024. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys + +from auto_optimizer import OnnxGraph + + +def main(): + onnx_path = sys.argv[1] + graph = OnnxGraph.parse(onnx_path) + reduceMax = graph.get_nodes('ReduceMax')[0] + reduceMax.attrs['axes'] = [0] + + graph.update_map() + graph.infershape() + + g_sim = graph.simplify() + save_path = onnx_path.replace(".onnx", "_modified.onnx") + g_sim.save(save_path) + print("Modified model saved to ", save_path) + + +if __name__ == "__main__": + main() diff --git a/ACL_PyTorch/built-in/audio/ConformerOfflineModel/precision_test.py b/ACL_PyTorch/built-in/audio/ConformerOfflineModel/precision_test.py new file mode 100644 index 0000000000000000000000000000000000000000..17cff9212724b7c9abfc0ae9568f63bf06a3aa08 --- /dev/null +++ b/ACL_PyTorch/built-in/audio/ConformerOfflineModel/precision_test.py @@ -0,0 +1,66 @@ +import sys +from pruned_transducer_stateless5.onnx_pretrained import OnnxModel + +import numpy as np +from ais_bench.infer.interface import InferSession +import torch +from torch.nn.functional import cosine_similarity + + +# Initialize the ONNX model globally +onnxmodel = OnnxModel("./exp/encoder-epoch-99-avg-1.onnx", "./exp/decoder-epoch-99-avg-1.onnx", "./exp/joiner-epoch-99-avg-1.onnx") + +def is_close_to_ones(x1, atol=1e-04): + x2 = torch.ones_like(x1) + return torch.allclose(x1, x2, atol) + +def precision_test(om_output, onnx_output): + result = is_close_to_ones(cosine_similarity(om_output, onnx_output)) + print("Precision test passed" if result else "Precision test failed") + +def run_infer_session(session, inputs, custom_sizes=None): + if custom_sizes is not None: + return session.infer(inputs, 'dymshape', custom_sizes=custom_sizes) + else: + return session.infer(inputs) + +def evaluate_model(mode, om_path): + session = InferSession(0, om_path) + + if mode == 'encoder': + x, x_lens = np.random.rand(1, 100, 80).astype(np.float32), np.array([100]) + output_size = 100000 + om_outputs = run_infer_session(session, [x, x_lens], custom_sizes=output_size) + + x_tensor, x_lens_tensor = torch.from_numpy(x), torch.from_numpy(x_lens) + onnx_output, _ = onnxmodel.run_encoder(x_tensor, x_lens_tensor) + + elif mode == 'decoder': + y = np.random.randint(0, 10, size=(1, 2)).astype(np.int64) + om_outputs = run_infer_session(session, [y]) + + y_tensor = torch.from_numpy(y) + onnx_output = onnxmodel.run_decoder(y_tensor) + + elif mode == 'joiner': + enc, dec = np.random.rand(1, 512).astype(np.float32), np.random.rand(1, 512).astype(np.float32) + om_outputs = run_infer_session(session, [enc, dec]) + + enc_tensor, dec_tensor = torch.from_numpy(enc), torch.from_numpy(dec) + onnx_output = onnxmodel.run_joiner(enc_tensor, dec_tensor) + + else: + raise ValueError("Invalid mode") + + om_output = torch.from_numpy(om_outputs[0]) + precision_test(om_output, onnx_output) + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("Usage: